hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
588720a8fbd2b194538c95af29d2bebc57a110ae
| 4,101
|
py
|
Python
|
custom_components/xiaomi_miot/device_tracker.py
|
ss109/hass-xiaomi-miot
|
a69c8e0e44400b9aa0f94f1003d3c6f3de4996fd
|
[
"Apache-2.0"
] | 1
|
2021-12-10T12:30:34.000Z
|
2021-12-10T12:30:34.000Z
|
custom_components/xiaomi_miot/device_tracker.py
|
ss109/hass-xiaomi-miot
|
a69c8e0e44400b9aa0f94f1003d3c6f3de4996fd
|
[
"Apache-2.0"
] | null | null | null |
custom_components/xiaomi_miot/device_tracker.py
|
ss109/hass-xiaomi-miot
|
a69c8e0e44400b9aa0f94f1003d3c6f3de4996fd
|
[
"Apache-2.0"
] | null | null | null |
"""Support for Xiaomi device tracker."""
import logging
from datetime import timedelta
from homeassistant.const import * # noqa: F401
from homeassistant.components.device_tracker import (
DOMAIN as ENTITY_DOMAIN,
)
from homeassistant.components.device_tracker.config_entry import TrackerEntity
from . import (
DOMAIN,
CONF_MODEL,
XIAOMI_CONFIG_SCHEMA as PLATFORM_SCHEMA, # noqa: F401
MiotEntity,
async_setup_config_entry,
bind_services_to_entries,
)
from .core.miot_spec import (
MiotSpec,
MiotService,
)
from .binary_sensor import MiotBinarySensorSubEntity
_LOGGER = logging.getLogger(__name__)
DATA_KEY = f'{ENTITY_DOMAIN}.{DOMAIN}'
SCAN_INTERVAL = timedelta(seconds=60)
SERVICE_TO_METHOD = {}
async def async_setup_entry(hass, config_entry, async_add_entities):
await async_setup_config_entry(hass, config_entry, async_setup_platform, async_add_entities, ENTITY_DOMAIN)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
hass.data.setdefault(DATA_KEY, {})
hass.data[DOMAIN]['add_entities'][ENTITY_DOMAIN] = async_add_entities
model = str(config.get(CONF_MODEL) or '')
if model.find('mirror') >= 0:
_LOGGER.debug('Setup device_tracker: %s', config)
entities = []
miot = config.get('miot_type')
if miot:
spec = await MiotSpec.async_from_type(hass, miot)
for srv in spec.get_services('watch', 'rearview_mirror'):
if not srv.get_property('latitude', 'longitude'):
continue
entities.append(MiotTrackerEntity(config, srv))
for entity in entities:
hass.data[DOMAIN]['entities'][entity.unique_id] = entity
async_add_entities(entities, update_before_add=True)
bind_services_to_entries(hass, SERVICE_TO_METHOD)
class MiotTrackerEntity(MiotEntity, TrackerEntity):
def __init__(self, config, miot_service: MiotService):
super().__init__(miot_service, config=config, logger=_LOGGER)
async def async_update(self):
await super().async_update()
if not self._available:
return
add_binary_sensors = self._add_entities.get('binary_sensor')
for p in self._miot_service.get_properties('driving_status'):
if p.full_name in self._subs:
self._subs[p.full_name].update()
elif add_binary_sensors and p.format == 'bool':
self._subs[p.full_name] = MiotBinarySensorSubEntity(self, p)
add_binary_sensors([self._subs[p.full_name]])
@property
def should_poll(self):
"""No polling for entities that have location pushed."""
return True
@property
def source_type(self):
"""Return the source type, eg gps or router, of the device."""
return 'gps'
@property
def latitude(self):
"""Return latitude value of the device."""
prop = self._miot_service.get_property('latitude')
if prop:
return prop.from_dict(self._state_attrs)
return NotImplementedError
@property
def longitude(self):
"""Return longitude value of the device."""
prop = self._miot_service.get_property('longitude')
if prop:
return prop.from_dict(self._state_attrs)
return NotImplementedError
@property
def location_name(self):
"""Return a location name for the current location of the device."""
prop = self._miot_service.get_property('current_address')
if prop:
return prop.from_dict(self._state_attrs)
return None
@property
def location_accuracy(self):
"""Return the location accuracy of the device.
Value in meters.
"""
return 0
@property
def battery_level(self):
"""Return the battery level of the device."""
sls = [self._miot_service, *self._miot_service.spec.get_services('battery')]
for srv in sls:
prop = srv.get_property('battery_level')
if prop:
return prop.from_dict(self._state_attrs)
return None
| 33.614754
| 111
| 0.674957
|
cd0bbfd868baf70536ea06d852d8d0548ddac6a8
| 5,279
|
py
|
Python
|
tools/split_dataset.py
|
by-liu/RetinalApp
|
53173b2b20dfcf613a3a22d6caa5178771d14225
|
[
"MIT"
] | null | null | null |
tools/split_dataset.py
|
by-liu/RetinalApp
|
53173b2b20dfcf613a3a22d6caa5178771d14225
|
[
"MIT"
] | null | null | null |
tools/split_dataset.py
|
by-liu/RetinalApp
|
53173b2b20dfcf613a3a22d6caa5178771d14225
|
[
"MIT"
] | null | null | null |
import os.path as osp
import math
import random
import numpy as np
from tqdm import tqdm
from retinal.data.retinal_lesions import RetinalLesions
from retinal.utils import load_list, save_list
def stats():
data_root = "./data/retinal-lesions/"
dataset = RetinalLesions(
data_root,
osp.join(data_root, "all.txt"),
return_id=True
)
lesions = {}
for i in tqdm(range(len(dataset))):
img, target, sample_name = dataset[i]
target = np.sum(target, axis=(0, 1))
lesions[sample_name] = []
for j in range(target.shape[0]):
if target[j] > 0:
lesions[sample_name].append(str(j))
out_path = osp.join(data_root, "lesions.txt")
with open(out_path, "w") as f:
for sample_name in lesions:
f.write("{} {}\n".format(sample_name, ",".join(lesions[sample_name])))
def save_samples(samples, path) -> None:
with open(path, "w") as f:
for s in samples:
f.write("{}\n".format(s))
def split_by_class(path):
data_root = "./data/retinal-lesions/"
classes_path = osp.join(data_root, "classes.txt")
classes = load_list(classes_path)
classes = [x.split(",")[0] for x in classes]
classes.append("void")
#classes_samples = [[] for _ in range(len(classes))]
classes_samples = dict(zip(classes, [[] for _ in range(len(classes))]))
with open(path, "r") as f:
for line in f:
fields = line.strip().split(" ")
sample_name = fields[0]
ids = "" if len(fields) == 1 else fields[1]
# sample_name, ids = line.strip().split(" ")
if ids == "":
classes_samples["void"].append(sample_name)
continue
ids = [int(x) for x in ids.split(",")]
for i in ids:
classes_samples[classes[i]].append(sample_name)
sorted_classes_samples = sorted(classes_samples.items(), key=lambda item: len(item[1]))
train, val = set(), set()
for class_name, samples in sorted_classes_samples:
random.shuffle(samples)
all_number = len(samples)
val_number = min(math.ceil(all_number * 0.20), 200)
train_number = all_number - val_number
print("========")
print("{} - train {} val {}".format(class_name, train_number, val_number))
index = 0
cnt = 0
while cnt < val_number and index < min(len(samples), val_number * 2):
if samples[index] not in train and samples[index] not in val:
val.add(samples[index])
cnt += 1
index += 1
else:
index += 1
print("val {}".format(cnt))
index = 0
cnt = 0
# while cnt < train_number and index < len(samples):
while index < len(samples):
if samples[index] not in train and samples[index] not in val:
train.add(samples[index])
cnt += 1
index += 1
else:
index += 1
print("train {}".format(cnt))
print("After {} - train {} val {}".format(class_name, len(train), len(val)))
print("train : {} {}".format(len(train), len(set(train))))
print("val : {} {}".format(len(val), len(set(val))))
save_samples(list(train), osp.join(data_root, "train.txt"))
save_samples(list(val), osp.join(data_root, "val.txt"))
def lesion_dist(split):
data_root = "./data/retinal-lesions/"
classes_path = osp.join(data_root, "classes.txt")
classes = load_list(classes_path)
classes = [x.split(",")[0] for x in classes]
classes.append("void")
samples = {}
lesion_path = osp.join(data_root, "lesions.txt")
with open(lesion_path, "r") as f:
for line in f:
fields = line.strip().split(" ")
sample_name = fields[0]
ids = "" if len(fields) == 1 else fields[1]
if ids == "":
samples[sample_name] = [-1]
continue
ids = [int(x) for x in ids.split(",")]
samples[sample_name] = ids
classes_count = {}
for i in range(len(classes)):
classes_count[classes[i]] = 0
split_path = osp.join(data_root, "{}.txt".format(split))
with open(split_path, "r") as f:
for line in f:
sample_name = line.strip()
ids = samples[sample_name]
for i in ids:
classes_count[classes[i]] += 1
print(split)
print("========")
for i in range(len(classes)):
print("{} {} {}".format(i, classes[i], classes_count[classes[i]]))
print("========")
# stats()
data_root = "./data/retinal-lesions/"
lesion_dist("train")
lesion_dist("val")
# split_by_class(osp.join(data_root, "lesions.txt"))
# data_dir = "./data/retinal-lesions/"
# all_list = load_list(osp.join(data_dir, "all.txt"))
# random.shuffle(all_list)
# train_num = int(len(all_list) * 0.8)
# val_num = len(all_list) - train_num
# train_list = all_list[:train_num]
# val_list = all_list[train_num:]
# print("{} + {} = {}".format(len(train_list), len(val_list), len(all_list)))
# save_list(osp.join(data_dir, "train.txt"), train_list)
# save_list(osp.join(data_dir, "val.txt"), val_list)
# print("done")
| 32.189024
| 91
| 0.569426
|
9ddd0dd416af3ab28883b1871f8423817c06ec73
| 14,652
|
py
|
Python
|
pynndescent/distances.py
|
AvantiShri/pynndescent
|
e44b848148f0845eeb9fa762418055bb30c71267
|
[
"BSD-2-Clause"
] | null | null | null |
pynndescent/distances.py
|
AvantiShri/pynndescent
|
e44b848148f0845eeb9fa762418055bb30c71267
|
[
"BSD-2-Clause"
] | null | null | null |
pynndescent/distances.py
|
AvantiShri/pynndescent
|
e44b848148f0845eeb9fa762418055bb30c71267
|
[
"BSD-2-Clause"
] | null | null | null |
# Author: Leland McInnes <leland.mcinnes@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import numba
_mock_identity = np.eye(2, dtype=np.float32)
_mock_ones = np.ones(2, dtype=np.float32)
@numba.njit(fastmath=True, cache=True)
def euclidean(x, y):
r"""Standard euclidean distance.
.. math::
D(x, y) = \\sqrt{\sum_i (x_i - y_i)^2}
"""
result = 0.0
for i in range(x.shape[0]):
result += (x[i] - y[i]) ** 2
return np.sqrt(result)
@numba.njit(
"f4(f4[::1],f4[::1])",
fastmath=True,
locals={
"result": numba.types.float32,
"diff": numba.types.float32,
"dim": numba.types.uint32,
"i": numba.types.uint16,
},
)
def squared_euclidean(x, y):
r"""Squared euclidean distance.
.. math::
D(x, y) = \sum_i (x_i - y_i)^2
"""
result = 0.0
dim = x.shape[0]
for i in range(dim):
diff = x[i] - y[i]
result += diff * diff
return result
@numba.njit(fastmath=True, cache=True)
def standardised_euclidean(x, y, sigma=_mock_ones):
r"""Euclidean distance standardised against a vector of standard
deviations per coordinate.
.. math::
D(x, y) = \sqrt{\sum_i \frac{(x_i - y_i)**2}{v_i}}
"""
result = 0.0
for i in range(x.shape[0]):
result += ((x[i] - y[i]) ** 2) / sigma[i]
return np.sqrt(result)
@numba.njit(fastmath=True, cache=True)
def manhattan(x, y):
r"""Manhattan, taxicab, or l1 distance.
.. math::
D(x, y) = \sum_i |x_i - y_i|
"""
result = 0.0
for i in range(x.shape[0]):
result += np.abs(x[i] - y[i])
return result
@numba.njit(fastmath=True, cache=True)
def chebyshev(x, y):
r"""Chebyshev or l-infinity distance.
.. math::
D(x, y) = \max_i |x_i - y_i|
"""
result = 0.0
for i in range(x.shape[0]):
result = max(result, np.abs(x[i] - y[i]))
return result
@numba.njit(fastmath=True, cache=True)
def minkowski(x, y, p=2):
r"""Minkowski distance.
.. math::
D(x, y) = \left(\sum_i |x_i - y_i|^p\right)^{\frac{1}{p}}
This is a general distance. For p=1 it is equivalent to
manhattan distance, for p=2 it is Euclidean distance, and
for p=infinity it is Chebyshev distance. In general it is better
to use the more specialised functions for those distances.
"""
result = 0.0
for i in range(x.shape[0]):
result += (np.abs(x[i] - y[i])) ** p
return result ** (1.0 / p)
@numba.njit(fastmath=True, cache=True)
def weighted_minkowski(x, y, w=_mock_ones, p=2):
r"""A weighted version of Minkowski distance.
.. math::
D(x, y) = \left(\sum_i w_i |x_i - y_i|^p\right)^{\frac{1}{p}}
If weights w_i are inverse standard deviations of graph_data in each dimension
then this represented a standardised Minkowski distance (and is
equivalent to standardised Euclidean distance for p=1).
"""
result = 0.0
for i in range(x.shape[0]):
result += (w[i] * np.abs(x[i] - y[i])) ** p
return result ** (1.0 / p)
@numba.njit(fastmath=True, cache=True)
def mahalanobis(x, y, vinv=_mock_identity):
result = 0.0
diff = np.empty(x.shape[0], dtype=np.float32)
for i in range(x.shape[0]):
diff[i] = x[i] - y[i]
for i in range(x.shape[0]):
tmp = 0.0
for j in range(x.shape[0]):
tmp += vinv[i, j] * diff[j]
result += tmp * diff[i]
return np.sqrt(result)
@numba.njit(fastmath=True, cache=True)
def hamming(x, y):
result = 0.0
for i in range(x.shape[0]):
if x[i] != y[i]:
result += 1.0
return float(result) / x.shape[0]
@numba.njit(fastmath=True, cache=True)
def canberra(x, y):
result = 0.0
for i in range(x.shape[0]):
denominator = np.abs(x[i]) + np.abs(y[i])
if denominator > 0:
result += np.abs(x[i] - y[i]) / denominator
return result
@numba.njit(fastmath=True, cache=True)
def bray_curtis(x, y):
numerator = 0.0
denominator = 0.0
for i in range(x.shape[0]):
numerator += np.abs(x[i] - y[i])
denominator += np.abs(x[i] + y[i])
if denominator > 0.0:
return float(numerator) / denominator
else:
return 0.0
@numba.njit(fastmath=True, cache=True)
def jaccard(x, y):
num_non_zero = 0.0
num_equal = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_non_zero += x_true or y_true
num_equal += x_true and y_true
if num_non_zero == 0.0:
return 0.0
else:
return float(num_non_zero - num_equal) / num_non_zero
@numba.njit(fastmath=True, cache=True)
def matching(x, y):
num_not_equal = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_not_equal += x_true != y_true
return float(num_not_equal) / x.shape[0]
@numba.njit(fastmath=True, cache=True)
def dice(x, y):
num_true_true = 0.0
num_not_equal = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_true_true += x_true and y_true
num_not_equal += x_true != y_true
if num_not_equal == 0.0:
return 0.0
else:
return num_not_equal / (2.0 * num_true_true + num_not_equal)
@numba.njit(fastmath=True, cache=True)
def kulsinski(x, y):
num_true_true = 0.0
num_not_equal = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_true_true += x_true and y_true
num_not_equal += x_true != y_true
if num_not_equal == 0:
return 0.0
else:
return float(num_not_equal - num_true_true + x.shape[0]) / (
num_not_equal + x.shape[0]
)
@numba.njit(fastmath=True, cache=True)
def rogers_tanimoto(x, y):
num_not_equal = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_not_equal += x_true != y_true
return (2.0 * num_not_equal) / (x.shape[0] + num_not_equal)
@numba.njit(fastmath=True, cache=True)
def russellrao(x, y):
num_true_true = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_true_true += x_true and y_true
if num_true_true == np.sum(x != 0) and num_true_true == np.sum(y != 0):
return 0.0
else:
return float(x.shape[0] - num_true_true) / (x.shape[0])
@numba.njit(fastmath=True, cache=True)
def sokal_michener(x, y):
num_not_equal = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_not_equal += x_true != y_true
return (2.0 * num_not_equal) / (x.shape[0] + num_not_equal)
@numba.njit(fastmath=True, cache=True)
def sokal_sneath(x, y):
num_true_true = 0.0
num_not_equal = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_true_true += x_true and y_true
num_not_equal += x_true != y_true
if num_not_equal == 0.0:
return 0.0
else:
return num_not_equal / (0.5 * num_true_true + num_not_equal)
@numba.njit(fastmath=True, cache=True)
def haversine(x, y):
if x.shape[0] != 2:
raise ValueError("haversine is only defined for 2 dimensional graph_data")
sin_lat = np.sin(0.5 * (x[0] - y[0]))
sin_long = np.sin(0.5 * (x[1] - y[1]))
result = np.sqrt(sin_lat ** 2 + np.cos(x[0]) * np.cos(y[0]) * sin_long ** 2)
return 2.0 * np.arcsin(result)
@numba.njit(fastmath=True, cache=True)
def yule(x, y):
num_true_true = 0.0
num_true_false = 0.0
num_false_true = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_true_true += x_true and y_true
num_true_false += x_true and (not y_true)
num_false_true += (not x_true) and y_true
num_false_false = x.shape[0] - num_true_true - num_true_false - num_false_true
if num_true_false == 0.0 or num_false_true == 0.0:
return 0.0
else:
return (2.0 * num_true_false * num_false_true) / (
num_true_true * num_false_false + num_true_false * num_false_true
)
@numba.njit(fastmath=True, cache=True)
def cosine(x, y):
result = 0.0
norm_x = 0.0
norm_y = 0.0
for i in range(x.shape[0]):
result += x[i] * y[i]
norm_x += x[i] ** 2
norm_y += y[i] ** 2
if norm_x == 0.0 and norm_y == 0.0:
return 0.0
elif norm_x == 0.0 or norm_y == 0.0:
return 1.0
else:
return 1.0 - (result / np.sqrt(norm_x * norm_y))
@numba.njit(
"f4(f4[::1],f4[::1])",
fastmath=True,
locals={
"result": numba.types.float32,
"norm_x": numba.types.float32,
"norm_y": numba.types.float32,
"dim": numba.types.uint32,
"i": numba.types.uint16,
},
)
def alternative_cosine(x, y):
result = 0.0
norm_x = 0.0
norm_y = 0.0
dim = x.shape[0]
for i in range(dim):
result += x[i] * y[i]
norm_x += x[i] * x[i]
norm_y += y[i] * y[i]
if norm_x == 0.0 and norm_y == 0.0:
return 0.0
elif norm_x == 0.0 or norm_y == 0.0:
return np.inf
elif result <= 0.0:
return np.inf
else:
return 0.5 * (np.log(norm_x) + np.log(norm_y)) - np.log(result)
@numba.vectorize(fastmath=True, cache=True)
def correct_alternative_cosine(d):
return 1.0 - np.exp(-d)
@numba.njit(fastmath=True, cache=True)
def correlation(x, y):
mu_x = 0.0
mu_y = 0.0
norm_x = 0.0
norm_y = 0.0
dot_product = 0.0
for i in range(x.shape[0]):
mu_x += x[i]
mu_y += y[i]
mu_x /= x.shape[0]
mu_y /= x.shape[0]
for i in range(x.shape[0]):
shifted_x = x[i] - mu_x
shifted_y = y[i] - mu_y
norm_x += shifted_x ** 2
norm_y += shifted_y ** 2
dot_product += shifted_x * shifted_y
if norm_x == 0.0 and norm_y == 0.0:
return 0.0
elif dot_product == 0.0:
return 1.0
else:
return 1.0 - (dot_product / np.sqrt(norm_x * norm_y))
@numba.njit(
"f4(f4[::1],f4[::1])",
fastmath=True,
locals={
"result": numba.types.float32,
"l1_norm_x": numba.types.float32,
"l1_norm_y": numba.types.float32,
"dim": numba.types.uint32,
"i": numba.types.uint16,
},
)
def hellinger(x, y):
result = 0.0
l1_norm_x = 0.0
l1_norm_y = 0.0
dim = x.shape[0]
for i in range(dim):
result += np.sqrt(x[i] * y[i])
l1_norm_x += x[i]
l1_norm_y += y[i]
if l1_norm_x == 0 and l1_norm_y == 0:
return 0.0
elif l1_norm_x == 0 or l1_norm_y == 0:
return 1.0
else:
return np.sqrt(1 - result / np.sqrt(l1_norm_x * l1_norm_y))
@numba.njit(
"f4(f4[::1],f4[::1])",
fastmath=True,
locals={
"result": numba.types.float32,
"l1_norm_x": numba.types.float32,
"l1_norm_y": numba.types.float32,
"dim": numba.types.uint32,
"i": numba.types.uint16,
},
)
def alternative_hellinger(x, y):
result = 0.0
l1_norm_x = 0.0
l1_norm_y = 0.0
dim = x.shape[0]
for i in range(dim):
result += np.sqrt(x[i] * y[i])
l1_norm_x += x[i]
l1_norm_y += y[i]
if l1_norm_x == 0 and l1_norm_y == 0:
return 0.0
elif l1_norm_x == 0 or l1_norm_y == 0:
return np.inf
elif result <= 0:
return np.inf
else:
return 0.5 * (np.log(l1_norm_x) + np.log(l1_norm_y)) - np.log(result)
@numba.vectorize(fastmath=True, cache=True)
def correct_alternative_hellinger(d):
return np.arccos(np.exp(-d))
@numba.njit()
def rankdata(a, method="average"):
arr = np.ravel(np.asarray(a))
if method == "ordinal":
sorter = arr.argsort(kind="mergesort")
else:
sorter = arr.argsort(kind="quicksort")
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size)
if method == "ordinal":
return (inv + 1).astype(np.float64)
arr = arr[sorter]
obs = np.ones(arr.size, np.bool_)
obs[1:] = arr[1:] != arr[:-1]
dense = obs.cumsum()[inv]
if method == "dense":
return dense.astype(np.float64)
# cumulative counts of each unique value
nonzero = np.nonzero(obs)[0]
count = np.concatenate((nonzero, np.array([len(obs)], nonzero.dtype)))
if method == "max":
return count[dense].astype(np.float64)
if method == "min":
return (count[dense - 1] + 1).astype(np.float64)
# average method
return 0.5 * (count[dense] + count[dense - 1] + 1)
@numba.njit(fastmath=True)
def spearmanr(x, y):
a = np.column_stack((x, y))
n_vars = a.shape[1]
for i in range(n_vars):
a[:, i] = rankdata(a[:, i])
rs = np.corrcoef(a, rowvar=0)
return rs[1, 0]
named_distances = {
# general minkowski distances
"euclidean": euclidean,
"l2": euclidean,
"manhattan": manhattan,
"taxicab": manhattan,
"l1": manhattan,
"chebyshev": chebyshev,
"linfinity": chebyshev,
"linfty": chebyshev,
"linf": chebyshev,
"minkowski": minkowski,
# Standardised/weighted distances
"seuclidean": standardised_euclidean,
"standardised_euclidean": standardised_euclidean,
"wminkowski": weighted_minkowski,
"weighted_minkowski": weighted_minkowski,
"mahalanobis": mahalanobis,
# Other distances
"canberra": canberra,
"cosine": cosine,
"correlation": correlation,
"hellinger": hellinger,
"haversine": haversine,
"braycurtis": bray_curtis,
"spearmanr": spearmanr,
# Binary distances
"hamming": hamming,
"jaccard": jaccard,
"dice": dice,
"matching": matching,
"kulsinski": kulsinski,
"rogerstanimoto": rogers_tanimoto,
"russellrao": russellrao,
"sokalsneath": sokal_sneath,
"sokalmichener": sokal_michener,
"yule": yule,
}
# Some distances have a faster to compute alternative that
# retains the same ordering of distances. We can compute with
# this instead, and then correct the final distances when complete.
# This provides a list of distances that have such an alternative
# along with the alternative distance function and the correction
# function to be applied.
fast_distance_alternatives = {
"euclidean": {"dist": squared_euclidean, "correction": np.sqrt},
"l2": {"dist": squared_euclidean, "correction": np.sqrt},
"cosine": {"dist": alternative_cosine, "correction": correct_alternative_cosine},
"hellinger": {
"dist": alternative_hellinger,
"correction": correct_alternative_hellinger,
},
}
| 25.218589
| 85
| 0.582105
|
082be12429b5455e54b7f5a468201147d3a1bf97
| 1,524
|
py
|
Python
|
fboss/py/fboss/cli/commands/agent.py
|
midopooler/fboss
|
c8d08dd4255e97e5977f53712e7c91a7d045a0cb
|
[
"BSD-3-Clause"
] | 1
|
2020-03-20T22:47:21.000Z
|
2020-03-20T22:47:21.000Z
|
fboss/py/fboss/cli/commands/agent.py
|
phshaikh/fboss
|
05e6ed1e9d62bf7db45a770886b1761e046c1722
|
[
"BSD-3-Clause"
] | null | null | null |
fboss/py/fboss/cli/commands/agent.py
|
phshaikh/fboss
|
05e6ed1e9d62bf7db45a770886b1761e046c1722
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (c) 2004-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
import json
from fboss.cli.commands import commands as cmds
from fboss.cli.utils.utils import KEYWORD_CONFIG_RELOAD, KEYWORD_CONFIG_SHOW
from neteng.fboss.ttypes import FbossBaseError
class AgentConfigCmd(cmds.FbossCmd):
def _print_config(self):
with self._create_agent_client() as client:
resp = client.getRunningConfig()
if not resp:
print("No Agent Config Info Found")
return
parsed = json.loads(resp)
print(json.dumps(parsed, indent=4, sort_keys=True, separators=(",", ": ")))
def _reload_config(self):
""" Reload agent config without restarting """
with self._create_agent_client() as client:
try:
client.reloadConfig()
print("Config reloaded")
return 0
except FbossBaseError as e:
print("Fboss Error: " + e)
return 2
def run(self, cmd_type):
if cmd_type == KEYWORD_CONFIG_SHOW:
self._print_config()
elif cmd_type == KEYWORD_CONFIG_RELOAD:
self._reload_config()
else:
raise Exception("Unknown command `{}`".format(cmd_type))
| 32.425532
| 83
| 0.636483
|
68831bf840d50483ad1e14a7b2106c8c66a620f9
| 628
|
py
|
Python
|
GT_forms/manage.py
|
10K-Linesofcode/Glowing-Tribble
|
be0e17ce5391b589792e4ae6b02156d7ee4ce145
|
[
"MIT"
] | null | null | null |
GT_forms/manage.py
|
10K-Linesofcode/Glowing-Tribble
|
be0e17ce5391b589792e4ae6b02156d7ee4ce145
|
[
"MIT"
] | null | null | null |
GT_forms/manage.py
|
10K-Linesofcode/Glowing-Tribble
|
be0e17ce5391b589792e4ae6b02156d7ee4ce145
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'GT_forms.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.545455
| 73
| 0.683121
|
9e5068b034cab2859b5d560d71be1c75f717cbf6
| 400
|
py
|
Python
|
learn-python/variable/nesting/list_nest.py
|
Moazzam125/learn-python
|
a0a92a5f4d1a031d0f66a7d10682c1844b1da80d
|
[
"MIT"
] | 2
|
2020-12-25T06:42:13.000Z
|
2020-12-25T10:25:55.000Z
|
learn-python/variable/nesting/list_nest.py
|
Moazzam125/learn-python
|
a0a92a5f4d1a031d0f66a7d10682c1844b1da80d
|
[
"MIT"
] | null | null | null |
learn-python/variable/nesting/list_nest.py
|
Moazzam125/learn-python
|
a0a92a5f4d1a031d0f66a7d10682c1844b1da80d
|
[
"MIT"
] | 2
|
2021-12-27T06:15:40.000Z
|
2022-01-05T15:08:29.000Z
|
''' List Nesting '''
# List in string
list_str = ['list in string', 'list']
str_nest = list_str
print(str_nest)
# List in List
list0 = ['list', 'in']
list1 = ['list']
list_nest = [list0, list1]
print(list_nest)
# List in Dictionary
list_dict0 = ['list', 'dictionary']
list_dict1 = ['in']
dict_nest = {
'sector': list_dict0[0] + " " + list_dict1[0] + " " + list_dict0[1]
}
print(dict_nest)
| 16
| 71
| 0.6325
|
28c6753f38de553d127ab549385d5af9443c8ee8
| 2,825
|
py
|
Python
|
cortex/secondary/data_quality.py
|
BIDMCDigitalPsychiatry/LAMP-cortex
|
805813e13843b0a76dad060a17b0e02756babf4d
|
[
"BSD-3-Clause"
] | 4
|
2021-03-24T16:46:12.000Z
|
2022-02-09T15:55:11.000Z
|
cortex/secondary/data_quality.py
|
BIDMCDigitalPsychiatry/LAMP-cortex
|
805813e13843b0a76dad060a17b0e02756babf4d
|
[
"BSD-3-Clause"
] | 15
|
2021-04-16T15:55:14.000Z
|
2022-03-04T16:42:27.000Z
|
cortex/secondary/data_quality.py
|
BIDMCDigitalPsychiatry/LAMP-cortex
|
805813e13843b0a76dad060a17b0e02756babf4d
|
[
"BSD-3-Clause"
] | 3
|
2021-04-13T15:04:46.000Z
|
2021-06-21T11:50:16.000Z
|
""" Module to compute the data quality from raw data """
import pandas as pd
import numpy as np
from ..feature_types import secondary_feature, log
from ..raw.accelerometer import accelerometer
from ..raw.gps import gps
MS_IN_A_DAY = 86400000
@secondary_feature(
name='cortex.feature.data_quality',
dependencies=[accelerometer, gps]
)
def data_quality(feature, bin_size=-1, **kwargs):
"""Compute the data quality of raw data over time.
Supported features: accelerometer, gps
Args:
feature (string): The feature to compute quality.
bin_size (int): How to split up time in ms.
Default: -1 will result in default settings
for accelerometer: 1000 (1 Hz, every 1s)
for gps: 1000 * 10 * 60 (every 10min)
**kwargs:
id (string): The participant's LAMP id. Required.
start (int): The initial UNIX timestamp (in ms) of the window for which the feature
is being generated. Required.
end (int): The last UNIX timestamp (in ms) of the window for which the feature
is being generated. Required.
Returns:
A dict consisting:
timestamp (int): The beginning of the window (same as kwargs['start']).
value (float): The percent of the time that there was at least one
data point in each time window of size "bin_size".
"""
_data_quality = 0
bin_width = bin_size
if feature == "accelerometer":
_data = accelerometer(**kwargs)['data']
if bin_size == -1:
bin_width = 1000
elif feature == "gps":
_data = gps(**kwargs)['data']
if bin_size == -1:
bin_width = 1000 * 10 * 60
else:
log.info("This feature is not yet supported.")
return {'timestamp':kwargs['start'], 'value': None}
if len(_data) == 0:
return {'timestamp':kwargs['start'], 'value': 0}
_data_quality = _get_quality(pd.DataFrame(_data)[["timestamp"]],
bin_width,
kwargs["start"],
kwargs["end"])
return {'timestamp':kwargs['start'], 'value': _data_quality}
def _get_quality(_data, bin_size, start, end):
""" Returns the data quality (percent of bins with one or more data points).
Args:
_data - the timestamps
bin_size - the size of the bins in ms
start - the start time in ms
end - the end time in ms
Returns:
the data quality
"""
count = 0
total_bins = (end - start) / bin_size
for i in range(start, end, bin_size):
arr = _data["timestamp"].to_numpy()
if np.any((arr < i + bin_size) & (arr >= i)):
count += 1
return count / total_bins
| 35.3125
| 95
| 0.585841
|
3d3da13f918971e3f84874aa13298b495901f62c
| 722
|
py
|
Python
|
252_meeting_room.py
|
ojhaanshu87/LeetCode
|
e6e4a15c4bb9f01b67c50689be3ead6c6d5df155
|
[
"Unlicense"
] | null | null | null |
252_meeting_room.py
|
ojhaanshu87/LeetCode
|
e6e4a15c4bb9f01b67c50689be3ead6c6d5df155
|
[
"Unlicense"
] | null | null | null |
252_meeting_room.py
|
ojhaanshu87/LeetCode
|
e6e4a15c4bb9f01b67c50689be3ead6c6d5df155
|
[
"Unlicense"
] | null | null | null |
'''
Given an array of meeting time intervals where intervals[i] = [starti, endi], determine if a person could attend all meetings.
Example 1:
Input: intervals = [[0,30],[5,10],[15,20]]
Output: false
Example 2:
Input: intervals = [[7,10],[2,4]]
Output: true
'''
'''
The idea here is to sort the meetings by starting time. Then, go through the meetings one by one and make sure that each meeting ends before the next one starts.
'''
class Solution(object):
def canAttendMeetings(self, intervals):
intervals = sorted(intervals, key=lambda elem:elem[0])
for elem in range(len(intervals) - 1):
if intervals[elem][1] > intervals[elem + 1][0]:
return False
return True
| 30.083333
| 161
| 0.66482
|
a99cb07093797b16305f61ac6260fc61857c489a
| 6,420
|
py
|
Python
|
src/kerassurgeon/examples/inception_flowers_prune.py
|
audatic/keras-surgeon
|
241ca31784cac59ec6ad31c0dfd723ce0e1643a3
|
[
"MIT"
] | null | null | null |
src/kerassurgeon/examples/inception_flowers_prune.py
|
audatic/keras-surgeon
|
241ca31784cac59ec6ad31c0dfd723ce0e1643a3
|
[
"MIT"
] | null | null | null |
src/kerassurgeon/examples/inception_flowers_prune.py
|
audatic/keras-surgeon
|
241ca31784cac59ec6ad31c0dfd723ce0e1643a3
|
[
"MIT"
] | null | null | null |
"""Prunes channels from Inception V3 fine tuned on a small flowers data set.
see setup instructions in inception_flowers_tune.py
inception_flowers_tune.py must be run first
"""
import math
from keras.applications import inception_v3
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD
from keras.models import Model, load_model
from keras.layers import Dense
from keras.callbacks import CSVLogger
import keras.backend as K
import pandas as pd
import numpy as np
import tensorflow as tf
from kerassurgeon.identify import get_apoz
from kerassurgeon import Surgeon
# dimensions of our images.
img_width, img_height = 299, 299
output_dir = "inception_flowers/"
train_data_dir = output_dir + "data/train/"
validation_data_dir = output_dir + "data/validation/"
tuned_weights_path = output_dir + "tuned_weights.h5"
epochs = 15
batch_size = 16
val_batch_size = 16
percent_pruning = 2
total_percent_pruning = 50
def iterative_prune_model():
# build the inception v3 network
base_model = inception_v3.InceptionV3(
include_top=False, weights="imagenet", pooling="avg", input_shape=(299, 299, 3)
)
print("Model loaded.")
top_output = Dense(5, activation="softmax")(base_model.output)
# add the model on top of the convolutional base
model = Model(base_model.inputs, top_output)
del base_model
model.load_weights(tuned_weights_path)
# compile the model with a SGD/momentum optimizer
# and a very slow learning rate.
model.compile(
loss="categorical_crossentropy",
optimizer=SGD(lr=1e-4, momentum=0.9),
metrics=["accuracy"],
)
# Set up data generators
train_datagen = ImageDataGenerator(
preprocessing_function=inception_v3.preprocess_input,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode="categorical",
)
train_steps = train_generator.n // train_generator.batch_size
test_datagen = ImageDataGenerator(
preprocessing_function=inception_v3.preprocess_input
)
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_height, img_width),
batch_size=val_batch_size,
class_mode="categorical",
)
val_steps = validation_generator.n // validation_generator.batch_size
# Evaluate the model performance before pruning
loss = model.evaluate_generator(
validation_generator, validation_generator.n // validation_generator.batch_size
)
print("original model validation loss: ", loss[0], ", acc: ", loss[1])
total_channels = get_total_channels(model)
n_channels_delete = int(math.floor(percent_pruning / 100 * total_channels))
# Incrementally prune the network, retraining it each time
percent_pruned = 0
# If percent_pruned > 0, continue pruning from previous checkpoint
if percent_pruned > 0:
checkpoint_name = "inception_flowers_pruning_" + str(percent_pruned) + "percent"
model = load_model(output_dir + checkpoint_name + ".h5")
while percent_pruned <= total_percent_pruning:
# Prune the model
apoz_df = get_model_apoz(model, validation_generator)
percent_pruned += percent_pruning
print("pruning up to ", str(percent_pruned), "% of the original model weights")
model = prune_model(model, apoz_df, n_channels_delete)
# Clean up tensorflow session after pruning and re-load model
checkpoint_name = "inception_flowers_pruning_" + str(percent_pruned) + "percent"
model.save(output_dir + checkpoint_name + ".h5")
del model
K.clear_session()
tf.reset_default_graph()
model = load_model(output_dir + checkpoint_name + ".h5")
# Re-train the model
model.compile(
loss="categorical_crossentropy",
optimizer=SGD(lr=1e-4, momentum=0.9),
metrics=["accuracy"],
)
checkpoint_name = "inception_flowers_pruning_" + str(percent_pruned) + "percent"
csv_logger = CSVLogger(output_dir + checkpoint_name + ".csv")
model.fit_generator(
train_generator,
steps_per_epoch=train_steps,
epochs=epochs,
validation_data=validation_generator,
validation_steps=val_steps,
workers=4,
callbacks=[csv_logger],
)
# Evaluate the final model performance
loss = model.evaluate_generator(
validation_generator, validation_generator.n // validation_generator.batch_size
)
print("pruned model loss: ", loss[0], ", acc: ", loss[1])
def prune_model(model, apoz_df, n_channels_delete):
# Identify 5% of channels with the highest APoZ in model
sorted_apoz_df = apoz_df.sort_values("apoz", ascending=False)
high_apoz_index = sorted_apoz_df.iloc[0:n_channels_delete, :]
# Create the Surgeon and add a 'delete_channels' job for each layer
# whose channels are to be deleted.
surgeon = Surgeon(model, copy=True)
for name in high_apoz_index.index.unique().values:
channels = list(
pd.Series(high_apoz_index.loc[name, "index"], dtype=np.int64).values
)
surgeon.add_job("delete_channels", model.get_layer(name), channels=channels)
# Delete channels
return surgeon.operate()
def get_total_channels(model):
start = None
end = None
channels = 0
for layer in model.layers[start:end]:
if layer.__class__.__name__ == "Conv2D":
channels += layer.filters
return channels
def get_model_apoz(model, generator):
# Get APoZ
start = None
end = None
apoz = []
for layer in model.layers[start:end]:
if layer.__class__.__name__ == "Conv2D":
print(layer.name)
apoz.extend(
[
(layer.name, i, value)
for (i, value) in enumerate(get_apoz(model, layer, generator))
]
)
layer_name, index, apoz_value = zip(*apoz)
apoz_df = pd.DataFrame({"layer": layer_name, "index": index, "apoz": apoz_value})
apoz_df = apoz_df.set_index("layer")
return apoz_df
if __name__ == "__main__":
iterative_prune_model()
| 33.612565
| 88
| 0.684424
|
79e767de37a9b40dc316237ae6d4a2b879f80056
| 361
|
py
|
Python
|
game/migrations/0003_auto_20180311_2100.py
|
mingaleg/yakubovich
|
95398c78eaffbd6ff69f8fdbedfc847531219d8a
|
[
"MIT"
] | 5
|
2018-12-12T16:24:42.000Z
|
2020-02-29T18:45:30.000Z
|
game/migrations/0003_auto_20180311_2100.py
|
mingaleg/yakubovich
|
95398c78eaffbd6ff69f8fdbedfc847531219d8a
|
[
"MIT"
] | 3
|
2020-06-05T17:47:13.000Z
|
2022-02-11T03:39:54.000Z
|
game/migrations/0003_auto_20180311_2100.py
|
mingaleg/yakubovich
|
95398c78eaffbd6ff69f8fdbedfc847531219d8a
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.3 on 2018-03-11 21:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('game', '0002_auto_20180311_2050'),
]
operations = [
migrations.AlterModelOptions(
name='game',
options={'permissions': (('start', 'Start game'),)},
),
]
| 20.055556
| 64
| 0.584488
|
37b38306f49233c139da5ccaba916fe565db1c4e
| 4,863
|
py
|
Python
|
gpMgmt/bin/gppylib/operations/utils.py
|
darthunix/gpdb
|
ef7e33315d46586a4b511846531f4973c23187dd
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
gpMgmt/bin/gppylib/operations/utils.py
|
darthunix/gpdb
|
ef7e33315d46586a4b511846531f4973c23187dd
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
gpMgmt/bin/gppylib/operations/utils.py
|
darthunix/gpdb
|
ef7e33315d46586a4b511846531f4973c23187dd
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
import os
import sys
import pickle
from gppylib import gplog
from gppylib.commands.base import OperationWorkerPool, Command, REMOTE
from gppylib.operations import Operation
DEFAULT_NUM_WORKERS = 64
logger = gplog.get_default_logger()
class RemoteOperation(Operation):
# TODO: The Operation that is run remotely cannot return Exceptions.
# This can be resolved easily with a class that wraps the exception: ExceptionCapsule. (Thank you, Pyro.)
# TODO: Remote traceback is lost. Again, this can be solved by embedding remote traceback in an ExceptionCapsule.
"""
RemoteOperation communicates w/ gpoperation.py on the remote end, with the following assumptions.
1) gppylib exists
2) gpoperation.py can see gppylib as a top-level module
3) obj is defined at the top level of its module
This requirement actually arises out of an underlying pickle issue, which in turn, appears
to result from a python class oddity. If class B is defined within class A, it does not consider
A to be its module. B is merely a class that is an attribute of A. For this reason, once instantiated,
B cannot be rebuilt from its module name and class name alone. Its outer class A is a missing piece
of information that gppickle cannot attain from python internals.
4) Most importantly, the operation provided here must be imported into the gppylib... namespace. Otherwise,
gpoperation.py will be unable to deserialize and import it on the remote end.
In the normal gppylib use case, some bin/ level script will use an absolute import to bring in something
from gppylib. In this manner, any ensuing imports (even if they're relative) will still be imported into the
gppylib namespace. Thus, pickling such objects over ssh to gpoperation.py will succeed, because such objects
will be immediately importable on the remote end.
However, there is exactly one edge case: unit testing. If a unit test is invoked directly through CLI, its objects
reside in the __main__ module as opposed to gppylib.test_something. Again, this can be circumvented by invoking unit tests
through PyUnit or python -m unittest, etc.
"""
def __init__(self, operation, host, msg_ctx=""):
super(RemoteOperation, self).__init__()
self.operation = operation
self.host = host
self.msg_ctx = msg_ctx
def execute(self):
execname = os.path.split(sys.argv[0])[-1]
pickled_execname = pickle.dumps(execname)
pickled_operation = pickle.dumps(self.operation)
cmd = Command('pickling an operation', '$GPHOME/sbin/gpoperation.py',
ctxt=REMOTE, remoteHost=self.host, stdin = pickled_execname + pickled_operation)
cmd.run(validateAfter=True)
msg = "on host %s: %s" % (self.host, cmd.get_results().stdout)
if self.msg_ctx:
msg = "Output for %s %s" % (self.msg_ctx, msg)
else:
msg = "Output %s" %(msg)
logger.debug(msg)
ret = self.operation.ret = pickle.loads(cmd.get_results().stdout)
if isinstance(ret, Exception):
raise ret
return ret
def __str__(self):
return "Remote(%s)" % str(self.operation)
class ParallelOperation(Operation):
"""
Caveat: execute returns None. It is the caller's responsibility to introspect operations.
"""
def __init__(self, operations, max_parallelism=DEFAULT_NUM_WORKERS):
super(ParallelOperation, self).__init__()
self.operations = operations
self.parallelism = min(len(operations), max_parallelism)
def execute(self):
if not self.operations or len(self.operations) == 0:
return
pool = OperationWorkerPool(numWorkers=self.parallelism, operations=self.operations)
pool.join()
pool.haltWork()
def __str__(self):
return "Parallel(%d)" % len(self.operations)
class SerialOperation(Operation):
"""
Caveat: All operations must succeed. SerialOperation will raise first exception encountered.
"""
def __init__(self, operations):
super(SerialOperation, self).__init__()
self.operations = operations
def execute(self):
return [operation.run() for operation in self.operations]
def __str__(self):
return "Serial(%d)" % len(self.operations)
class MasterOperation(Operation):
def __init__(self, operation):
super(MasterOperation, self).__init__()
self.operation = operation
def execute(self):
# TODO: check that we're running on master
pass
if __name__ == "__main__":
import sys
from unix import CheckFile, CheckRemoteFile
print RemoteOperation(CheckFile(sys.argv[1]), "localhost").run()
print CheckRemoteFile(sys.argv[1], "localhost").run()
| 44.614679
| 129
| 0.691754
|
cba5a871d6c5b8647c0e25943133c0a6dcf78dd8
| 232
|
py
|
Python
|
lemon/__init__.py
|
trilan/lemon
|
7f55b72a84fcae2c4b6372279dda2a05332f1e4c
|
[
"BSD-3-Clause"
] | 2
|
2018-11-11T16:15:21.000Z
|
2021-07-30T20:02:31.000Z
|
lemon/__init__.py
|
trilan/lemon
|
7f55b72a84fcae2c4b6372279dda2a05332f1e4c
|
[
"BSD-3-Clause"
] | null | null | null |
lemon/__init__.py
|
trilan/lemon
|
7f55b72a84fcae2c4b6372279dda2a05332f1e4c
|
[
"BSD-3-Clause"
] | 2
|
2016-10-07T06:42:03.000Z
|
2021-07-30T20:02:34.000Z
|
from .options import ModelAdmin, AppAdmin
from .options import StackedInline, TabularInline
from .sites import AdminSite, site
__all__ = [
'ModelAdmin', 'AppAdmin', 'StackedInline', 'TabularInline', 'AdminSite',
'site',
]
| 23.2
| 76
| 0.732759
|
cbf04dca5354cf0176909738de7d63cfe84e01cd
| 41,450
|
py
|
Python
|
cardinal_pythonlib/sqlalchemy/merge_db.py
|
bopopescu/pythonlib
|
9c2187d6092ba133342ca3374eb7c86f9d296c30
|
[
"Apache-2.0"
] | null | null | null |
cardinal_pythonlib/sqlalchemy/merge_db.py
|
bopopescu/pythonlib
|
9c2187d6092ba133342ca3374eb7c86f9d296c30
|
[
"Apache-2.0"
] | null | null | null |
cardinal_pythonlib/sqlalchemy/merge_db.py
|
bopopescu/pythonlib
|
9c2187d6092ba133342ca3374eb7c86f9d296c30
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# cardinal_pythonlib/sqlalchemy/merge_db.py
"""
===============================================================================
Original code copyright (C) 2009-2020 Rudolf Cardinal (rudolf@pobox.com).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
**Function "merge_db" to merge two databases via SQLAlchemy.**
*Notes:*
Note in passing: there is no common base class for SQLAlchemy ORM instances
(it's not :class:`DeclarativeMeta`). For example, in CamCOPS:
.. code-block:: none
> Phq9.__bases__
(<class 'camcops_server.cc_modules.cc_task.TaskHasPatientMixin'>,
<class 'camcops_server.cc_modules.cc_task.Task'>,
<class 'sqlalchemy.ext.declarative.api.Base'>)
... and that last :class:`Base` isn't a permanent class, just a newly named
thing; see :func:`sqlalchemy.ext.declarative.api.declarative_base`.
Again, with the CamCOPS classes:
.. code-block:: none
> issubclass(Phq9, Base)
True
> issubclass(Base, DeclarativeMeta)
False
> Base.__bases__
(<class 'object'>,)
So the best type hints we have are:
.. code-block:: none
class: Type
instance: object
"""
import sys
from typing import Any, Callable, Dict, List, Tuple, Type
import unittest
from sqlalchemy.engine import create_engine
from sqlalchemy.engine.base import Engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import lazyload, load_only
from sqlalchemy.orm import relationship
# noinspection PyProtectedMember
from sqlalchemy.orm.session import make_transient, Session, sessionmaker
from sqlalchemy.schema import sort_tables
from sqlalchemy.sql.schema import Column, ForeignKey, MetaData, Table
from sqlalchemy.sql.sqltypes import Integer, Text
from cardinal_pythonlib.dicts import map_keys_to_values
from cardinal_pythonlib.logs import (
get_brace_style_log_with_null_handler,
main_only_quicksetup_rootlogger,
)
from cardinal_pythonlib.sqlalchemy.dump import dump_database_as_insert_sql
from cardinal_pythonlib.sqlalchemy.orm_inspect import (
rewrite_relationships,
colname_to_attrname_dict,
copy_sqla_object,
get_orm_classes_by_table_name_from_base,
get_pk_attrnames,
)
from cardinal_pythonlib.sqlalchemy.schema import (
get_column_names,
get_table_names,
)
from cardinal_pythonlib.sqlalchemy.session import (
get_engine_from_session,
get_safe_url_from_engine,
get_safe_url_from_session,
SQLITE_MEMORY_URL,
)
from cardinal_pythonlib.sqlalchemy.table_identity import TableIdentity
log = get_brace_style_log_with_null_handler(__name__)
# =============================================================================
# TableDependency; get_all_dependencies
# =============================================================================
class TableDependency(object):
"""
Stores a table dependency for use in functions such as
:func:`sqlalchemy.schema.sort_tables`, which requires a tuple of two
:class:`Table` objects, in the order ``(parent, child)``, where ``child``
depends on ``parent`` (e.g. a field like ``child.parent_id`` refers to
``parent.id``).
"""
def __init__(self,
parent_table_id: TableIdentity = None,
child_table_id: TableIdentity = None,
parent_table: Table = None,
child_table: Table = None,
parent_tablename: str = None,
child_tablename: str = None,
metadata: MetaData = None) -> None:
"""
The parent and child tables can be specified by name, :class:`Table`
object, or our :class:`TableIdentity` descriptor class.
"""
overspecified = "Don't specify table with both TableIdentity and " \
"Table/tablename"
if parent_table_id:
self._parent = parent_table_id
assert parent_table is None and not parent_tablename, overspecified
else:
self._parent = TableIdentity(table=parent_table,
tablename=parent_tablename,
metadata=metadata)
if child_table_id:
self._child = child_table_id
assert child_table is None and not child_tablename, overspecified
else:
self._child = TableIdentity(table=child_table,
tablename=child_tablename,
metadata=metadata)
def __str__(self) -> str:
return f"{self.child_tablename} -> {self.parent_tablename}"
def __repr__(self) -> str:
return (
f"TableDependency({self.child_tablename!r} "
f"depends on {self.parent_tablename!r})"
)
def set_metadata(self, metadata: MetaData) -> None:
"""
Sets the metadata for the parent and child tables.
"""
self._parent.set_metadata(metadata)
self._child.set_metadata(metadata)
def set_metadata_if_none(self, metadata: MetaData) -> None:
"""
Sets the metadata for the parent and child tables, unless they were
set already.
"""
self._parent.set_metadata_if_none(metadata)
self._child.set_metadata_if_none(metadata)
@property
def parent_table(self) -> Table:
"""
Returns the parent table as a :class:`Table`.
"""
return self._parent.table
@property
def child_table(self) -> Table:
"""
Returns the child table as a :class:`Table`.
"""
return self._child.table
@property
def parent_tablename(self) -> str:
"""
Returns the parent table's string name.
"""
return self._parent.tablename
@property
def child_tablename(self) -> str:
"""
Returns the child table's string name.
"""
return self._child.tablename
def sqla_tuple(self) -> Tuple[Table, Table]:
"""
Returns the tuple ``(parent_table, child_table)``, both as
:class:`Table` objects.
"""
return self.parent_table, self.child_table
def get_all_dependencies(metadata: MetaData,
extra_dependencies: List[TableDependency] = None,
sort: bool = True) \
-> List[TableDependency]:
"""
Describes how the tables found in the metadata depend on each other.
(If table B contains a foreign key to table A, for example, then B depends
on A.)
Args:
metadata: the metadata to inspect
extra_dependencies: additional table dependencies to specify manually
sort: sort into alphabetical order of (parent, child) table names?
Returns:
a list of :class:`TableDependency` objects
See :func:`sort_tables_and_constraints` for method.
"""
extra_dependencies = extra_dependencies or [] # type: List[TableDependency] # noqa
for td in extra_dependencies:
td.set_metadata_if_none(metadata)
dependencies = set([td.sqla_tuple() for td in extra_dependencies])
tables = list(metadata.tables.values()) # type: List[Table]
for table in tables:
for fkc in table.foreign_key_constraints:
if fkc.use_alter is True:
# http://docs.sqlalchemy.org/en/latest/core/constraints.html#sqlalchemy.schema.ForeignKeyConstraint.params.use_alter # noqa
continue
dependent_on = fkc.referred_table
if dependent_on is not table:
dependencies.add((dependent_on, table))
if hasattr(table, "_extra_dependencies"):
# noinspection PyProtectedMember
dependencies.update(
(parent, table) for parent in table._extra_dependencies
)
dependencies = [
TableDependency(parent_table=parent, child_table=child)
for parent, child in dependencies
]
if sort:
dependencies.sort(key=lambda td_: (td_.parent_tablename,
td_.child_tablename))
return dependencies
# =============================================================================
# TableDependencyClassification; classify_tables_by_dependency_type
# =============================================================================
class TableDependencyClassification(object):
"""
Class to describe/classify a table in terms of its dependencies.
"""
def __init__(self,
table: Table,
children: List[Table] = None,
parents: List[Table] = None) -> None:
"""
Args:
table: the table in question
children: its children (things that depend on it)
parents: its parents (things that it depends on)
"""
self.table = table
self.children = children or [] # type: List[Table]
self.parents = parents or [] # type: List[Table]
self.circular = False
self.circular_chain = [] # type: List[Table]
@property
def is_child(self) -> bool:
"""
Is this table a child?
"""
return bool(self.parents)
@property
def is_parent(self) -> bool:
"""
Is this table a parent?
"""
return bool(self.children)
@property
def standalone(self) -> bool:
"""
Is this table standalone (neither a child nor a parent)?
"""
return not self.is_child and not self.is_parent
@property
def tablename(self) -> str:
"""
Returns the table's name.
"""
return self.table.name
@property
def parent_names(self) -> List[str]:
"""
Returns the names of this table's parents.
"""
return [t.name for t in self.parents]
@property
def child_names(self) -> List[str]:
"""
Returns the names of this table's children.
"""
return [t.name for t in self.children]
def set_circular(self, circular: bool, chain: List[Table] = None) -> None:
"""
Mark this table as circular (or not).
Args:
circular: is it circular?
chain: if it's circular, this should be the list of tables
participating in the circular chain
"""
self.circular = circular
self.circular_chain = chain or [] # type: List[Table]
@property
def circular_description(self) -> str:
"""
Description of the circular chain.
"""
return " -> ".join(t.name for t in self.circular_chain)
@property
def description(self) -> str:
"""
Short description.
"""
if self.is_parent and self.is_child:
desc = "parent+child"
elif self.is_parent:
desc = "parent"
elif self.is_child:
desc = "child"
else:
desc = "standalone"
if self.circular:
desc += f"+CIRCULAR({self.circular_description})"
return desc
def __str__(self) -> str:
return f"{self.tablename}:{self.description}"
def __repr__(self) -> str:
return (
f"TableDependencyClassification("
f"{self.tablename!r}:{self.description})"
)
def classify_tables_by_dependency_type(
metadata: MetaData,
extra_dependencies: List[TableDependency] = None,
sort: bool = True) \
-> List[TableDependencyClassification]:
"""
Inspects a metadata object (optionally adding other specified dependencies)
and returns a list of objects describing their dependencies.
Args:
metadata: the :class:`MetaData` to inspect
extra_dependencies: additional dependencies
sort: sort the results by table name?
Returns:
list of :class:`TableDependencyClassification` objects, one for each
table
"""
tables = list(metadata.tables.values()) # type: List[Table]
all_deps = get_all_dependencies(metadata, extra_dependencies)
tdcmap = {} # type: Dict[Table, TableDependencyClassification]
for table in tables:
parents = [td.parent_table for td in all_deps
if td.child_table == table]
children = [td.child_table for td in all_deps
if td.parent_table == table]
tdcmap[table] = TableDependencyClassification(
table, parents=parents, children=children
)
# Check for circularity
def parents_contain(start: Table,
probe: Table) -> Tuple[bool, List[Table]]:
tdc_ = tdcmap[start]
if probe in tdc_.parents:
return True, [start, probe]
for parent in tdc_.parents:
contains_, chain_ = parents_contain(start=parent, probe=probe)
if contains_:
return True, [start] + chain_
return False, []
def children_contain(start: Table,
probe: Table) -> Tuple[bool, List[Table]]:
tdc_ = tdcmap[start]
if probe in tdc_.children:
return True, [start, probe]
for child in tdc_.children:
contains_, chain_ = children_contain(start=child, probe=probe)
if contains_:
return True, [start] + chain_
return False, []
for table in tables:
tdc = tdcmap[table]
contains, chain = parents_contain(start=table, probe=table)
if contains:
tdc.set_circular(contains, chain)
else:
contains, chain = children_contain(start=table, probe=table)
if contains:
tdc.set_circular(contains, chain)
else:
tdc.set_circular(False)
classifications = list(tdcmap.values())
if sort:
classifications.sort(key=lambda c: c.tablename)
return classifications
# =============================================================================
# TranslationContext (for merge_db)
# =============================================================================
class TranslationContext(object):
"""
Information-passing object for user callbacks from :func:`merge_db`.
Args:
oldobj:
The old SQLAlchemy ORM object from the source session.
newobj:
The framework's go at building a new SQLAlchemy ORM object, which
will be inserted into the destination session.
The sequence is:
1. ``newobj`` is created
2. a :class:`TranslationContext` is created, referring to
``newobj``
3. The ``translate_fn`` parameter to :func:`merge_db` will be
called with the :class:`TranslationContext` as its parameter
- the user-suppled :func:`translate_fn` function can, at this
point, modify the ``newobj`` attribute
- if the user function sets the ``newobj`` attribute to
``None``, this object will be skipped
4. If the :class:`TranslationContext`'s ``newobj`` member is not
``None``, the new object is inserted into the destination
session.
objmap:
A dictionary mapping old to new objects, for objects in tables
other than standalone tables.
table:
SQLAlchemy ``Table`` object from the metadata. (Not necessarily
bound to any session, but will reflect the structure of the
destination, not necessarily the source, since the merge operation
assumes that the metadata describes the destination.)
tablename:
Table name that corresponds to ``table``.
src_session:
The SQLAlchemy :class:`Session` object for the source.
dst_session:
The SQLAlchemy :class:`Session` object for the destination.
src_engine:
The SQLAlchemy :class:`Engine` object for the source.
dst_engine:
The SQLAlchemy :class:`Engine` object for the destination.
missing_src_columns:
Names of columns known to be present in the destination but absent
from the source.
info:
Extra dictionary for additional user-specified information.
It is possible that ``oldobj`` and ``newobj`` are the SAME OBJECT.
"""
def __init__(self,
oldobj: object,
newobj: object,
objmap: Dict[object, object],
table: Table,
tablename: str,
src_session: Session,
dst_session: Session,
src_engine: Engine,
dst_engine: Engine,
src_table_names: List[str],
missing_src_columns: List[str] = None,
info: Dict[str, Any] = None) -> None:
self.oldobj = oldobj
self.newobj = newobj
self.objmap = objmap
self.table = table
self.tablename = tablename
self.src_session = src_session
self.dst_session = dst_session
self.src_engine = src_engine
self.dst_engine = dst_engine
self.src_table_names = src_table_names
self.missing_src_columns = missing_src_columns or [] # type: List[str]
self.info = info or {} # type: Dict[str, Any]
# =============================================================================
# merge_db
# =============================================================================
def merge_db(base_class: Type,
src_engine: Engine,
dst_session: Session,
allow_missing_src_tables: bool = True,
allow_missing_src_columns: bool = True,
translate_fn: Callable[[TranslationContext], None] = None,
skip_tables: List[TableIdentity] = None,
only_tables: List[TableIdentity] = None,
tables_to_keep_pks_for: List[TableIdentity] = None,
extra_table_dependencies: List[TableDependency] = None,
dummy_run: bool = False,
info_only: bool = False,
report_every: int = 1000,
flush_per_table: bool = True,
flush_per_record: bool = False,
commit_with_flush: bool = False,
commit_at_end: bool = True,
prevent_eager_load: bool = True,
trcon_info: Dict[str, Any] = None) -> None:
"""
Copies an entire database as far as it is described by ``metadata`` and
``base_class``, from SQLAlchemy ORM session ``src_session`` to
``dst_session``, and in the process:
- creates new primary keys at the destination, or raises an error if it
doesn't know how (typically something like: ``Field 'name' doesn't have a
default value``)
- maintains relationships, or raises an error if it doesn't know how
Basic method:
- Examines the metadata for the SQLAlchemy ORM base class you provide.
- Assumes that the tables exist (in the destination).
- For each table/ORM class found in the metadata:
- Queries (via the ORM) from the source.
- For each ORM instance retrieved:
- Writes information to the destination SQLAlchemy session.
- If that ORM object has relationships, process them too.
If a table is missing in the source, then that's OK if and only if
``allow_missing_src_tables`` is set. (Similarly with columns and
``allow_missing_src_columns``; we ask the ORM to perform a partial load,
of a subset of attributes only.)
Args:
base_class:
your ORM base class, e.g. from ``Base = declarative_base()``
src_engine:
SQLALchemy :class:`Engine` for the source database
dst_session:
SQLAlchemy :class:`Session` for the destination database
allow_missing_src_tables:
proceed if tables are missing from the source (allowing you to
import from older, incomplete databases)
allow_missing_src_columns:
proceed if columns are missing from the source (allowing you to
import from older, incomplete databases)
translate_fn:
optional function called with each instance, so you can modify
instances in the pipeline. Signature:
.. code-block:: python
def my_translate_fn(trcon: TranslationContext) -> None:
# We can modify trcon.newobj, or replace it (including
# setting trcon.newobj = None to omit this object).
pass
skip_tables:
tables to skip (specified as a list of :class:`TableIdentity`)
only_tables:
tables to restrict the processor to (specified as a list of
:class:`TableIdentity`)
tables_to_keep_pks_for:
tables for which PKs are guaranteed to be safe to insert into the
destination database, without modification (specified as a list of
:class:`TableIdentity`)
extra_table_dependencies:
optional list of :class:`TableDependency` objects (q.v.)
dummy_run:
don't alter the destination database
info_only:
show info, then stop
report_every:
provide a progress report every *n* records
flush_per_table:
flush the session after every table (reasonable)
flush_per_record:
flush the session after every instance (AVOID this if tables may
refer to themselves)
commit_with_flush:
``COMMIT`` with each flush?
commit_at_end:
``COMMIT`` when finished?
prevent_eager_load:
disable any eager loading (use lazy loading instead)
trcon_info:
additional dictionary passed to ``TranslationContext.info``
(see :class:`.TranslationContext`)
"""
log.info("merge_db(): starting")
if dummy_run:
log.warning("Dummy run only; destination will not be changed")
# Check parameters before we modify them
if only_tables is not None and not only_tables:
log.warning("... only_tables == []; nothing to do")
return
# Finalize parameters
skip_tables = skip_tables or [] # type: List[TableIdentity]
only_tables = only_tables or [] # type: List[TableIdentity]
tables_to_keep_pks_for = tables_to_keep_pks_for or [] # type: List[TableIdentity] # noqa
extra_table_dependencies = extra_table_dependencies or [] # type: List[TableDependency] # noqa
trcon_info = trcon_info or {} # type: Dict[str, Any]
# We need both Core and ORM for the source.
# noinspection PyUnresolvedReferences
metadata = base_class.metadata # type: MetaData
src_session = sessionmaker(bind=src_engine)() # type: Session
dst_engine = get_engine_from_session(dst_session)
tablename_to_ormclass = get_orm_classes_by_table_name_from_base(base_class)
# Tell all TableIdentity objects about their metadata
for tilist in [skip_tables, only_tables, tables_to_keep_pks_for]:
for ti in tilist:
ti.set_metadata_if_none(metadata)
for td in extra_table_dependencies:
td.set_metadata_if_none(metadata)
# Get all lists of tables as their names
skip_table_names = [ti.tablename for ti in skip_tables]
only_table_names = [ti.tablename for ti in only_tables]
tables_to_keep_pks_for = [ti.tablename for ti in tables_to_keep_pks_for] # type: List[str] # noqa
# ... now all are of type List[str]
# Safety check: this is an imperfect check for source == destination, but
# it is fairly easy to pass in the wrong URL, so let's try our best:
_src_url = get_safe_url_from_engine(src_engine)
_dst_url = get_safe_url_from_session(dst_session)
assert _src_url != _dst_url or _src_url == SQLITE_MEMORY_URL, (
"Source and destination databases are the same!"
)
# Check the right tables are present.
src_tables = sorted(get_table_names(src_engine))
dst_tables = sorted(list(tablename_to_ormclass.keys()))
log.debug("Source tables: {!r}", src_tables)
log.debug("Destination tables: {!r}", dst_tables)
if not allow_missing_src_tables:
missing_tables = sorted(
d for d in dst_tables
if d not in src_tables and d not in skip_table_names
)
if missing_tables:
raise RuntimeError("The following tables are missing from the "
"source database: " + repr(missing_tables))
table_num = 0
overall_record_num = 0
tables = list(metadata.tables.values()) # type: List[Table]
# Very helpfully, MetaData.sorted_tables produces tables in order of
# relationship dependency ("each table is preceded by all tables which
# it references");
# http://docs.sqlalchemy.org/en/latest/core/metadata.html
# HOWEVER, it only works if you specify ForeignKey relationships
# explicitly.
# We can also add in user-specified dependencies, and therefore can do the
# sorting in one step with sqlalchemy.schema.sort_tables:
ordered_tables = sort_tables(
tables,
extra_dependencies=[td.sqla_tuple() for td in extra_table_dependencies]
)
# Note that the ordering is NOT NECESSARILY CONSISTENT, though (in that
# the order of stuff it doesn't care about varies across runs).
all_dependencies = get_all_dependencies(metadata, extra_table_dependencies)
dep_classifications = classify_tables_by_dependency_type(
metadata, extra_table_dependencies)
circular = [tdc for tdc in dep_classifications if tdc.circular]
assert not circular, f"Circular dependencies! {circular!r}"
log.debug("All table dependencies: {}",
"; ".join(str(td) for td in all_dependencies))
log.debug("Table dependency classifications: {}",
"; ".join(str(c) for c in dep_classifications))
log.info("Processing tables in the order: {!r}",
[table.name for table in ordered_tables])
objmap = {}
def flush() -> None:
if not dummy_run:
log.debug("Flushing session")
dst_session.flush()
if commit_with_flush:
log.debug("Committing...")
dst_session.commit()
def translate(oldobj_: object, newobj_: object) -> object:
if translate_fn is None:
return newobj_
tc = TranslationContext(oldobj=oldobj_,
newobj=newobj_,
objmap=objmap,
table=table,
tablename=tablename,
src_session=src_session,
dst_session=dst_session,
src_engine=src_engine,
dst_engine=dst_engine,
missing_src_columns=missing_columns,
src_table_names=src_tables,
info=trcon_info)
translate_fn(tc)
if tc.newobj is None:
log.debug("Instance skipped by user-supplied translate_fn")
return tc.newobj
# -------------------------------------------------------------------------
# Now, per table/ORM class...
# -------------------------------------------------------------------------
for table in ordered_tables:
tablename = table.name
if tablename in skip_table_names:
log.info("... skipping table {!r} (as per skip_tables)", tablename)
continue
if only_table_names and tablename not in only_table_names:
log.info("... ignoring table {!r} (as per only_tables)", tablename)
continue
if allow_missing_src_tables and tablename not in src_tables:
log.info("... ignoring table {!r} (not in source database)",
tablename)
continue
table_num += 1
table_record_num = 0
src_columns = sorted(get_column_names(src_engine, tablename))
dst_columns = sorted([column.name for column in table.columns])
missing_columns = sorted(list(set(dst_columns) - set(src_columns)))
if not allow_missing_src_columns and missing_columns:
raise RuntimeError(
f"The following columns are missing from source table "
f"{tablename!r}: {missing_columns!r}")
orm_class = tablename_to_ormclass[tablename]
pk_attrs = get_pk_attrnames(orm_class)
c2a = colname_to_attrname_dict(orm_class)
missing_attrs = map_keys_to_values(missing_columns, c2a)
tdc = [tdc for tdc in dep_classifications if tdc.table == table][0]
log.info("Processing table {!r} via ORM class {!r}",
tablename, orm_class)
log.debug("PK attributes: {!r}", pk_attrs)
log.debug("Table: {!r}", table)
log.debug("Dependencies: parents = {!r}; children = {!r}",
tdc.parent_names, tdc.child_names)
if info_only:
log.debug("info_only; skipping table contents")
continue
def wipe_primary_key(inst: object) -> None:
for attrname in pk_attrs:
setattr(inst, attrname, None)
query = src_session.query(orm_class)
if allow_missing_src_columns and missing_columns:
src_attrs = map_keys_to_values(src_columns, c2a)
log.info("Table {} is missing columns {} in the source",
tablename, missing_columns)
log.debug("... using only columns {} via attributes {}",
src_columns, src_attrs)
query = query.options(load_only(*src_attrs))
# PROBLEM: it will not ignore the PK.
if prevent_eager_load:
query = query.options(lazyload("*"))
wipe_pk = tablename not in tables_to_keep_pks_for
# How best to deal with relationships?
#
# This doesn't work:
# - process tables in order of dependencies, eager-loading
# relationships with
# for relationship in insp.mapper.relationships: # type: RelationshipProperty # noqa
# related_col = getattr(orm_class, relationship.key)
# query = query.options(joinedload(related_col))
# - expunge from old session / make_transient / wipe_primary_key/ add
# to new session
# ... get errors like
# sqlalchemy.exc.InvalidRequestError: Object '<Parent at
# 0x7f99492440b8>' is already attached to session '7' (this is
# '6')
#
# ... at the point of dst_session.add(instance)
# ... when adding the object on the child side of the relationship
# ... I suspect that we move the Parent from session S to session D,
# but when we eager-load the Parent from the Child, that makes
# another in session S, so when we add the Child to session D, its
# parent is in session S, which is wrong.
#
# We must, therefore, take a more interventional approach, in which we
# maintain a copy of the old object, make a copy using
# copy_sqla_object, and re-assign relationships accordingly.
for instance in query.all():
# log.debug("Source instance: {!r}", instance)
table_record_num += 1
overall_record_num += 1
if table_record_num % report_every == 0:
log.info("... progress{}: on table {} ({}); record {} this "
"table; overall record {}",
" (DUMMY RUN)" if dummy_run else "",
table_num, tablename,
table_record_num, overall_record_num)
if tdc.standalone:
# Our table has neither parents nor children. We can therefore
# simply move the instance from one session to the other,
# blanking primary keys.
# https://stackoverflow.com/questions/14636192/sqlalchemy-modification-of-detached-object # noqa
src_session.expunge(instance)
make_transient(instance)
if wipe_pk:
wipe_primary_key(instance)
instance = translate(instance, instance)
if not instance:
continue # translate_fn elected to skip it
if not dummy_run:
dst_session.add(instance)
# new PK will be created when session is flushed
else:
# Our table has either parents or children. We therefore make
# a copy and place the COPY in the destination session. If
# this object may be a parent, we maintain a log (in objmap)
# of the old-to-new mapping. If this object is a child, we
# re-assign its relationships based on the old-to-new mapping
# (since we will have processed the parent table first, having
# carefully ordered them in advance).
oldobj = instance # rename for clarity
newobj = copy_sqla_object(
oldobj, omit_pk=wipe_pk, omit_fk=True,
omit_attrs=missing_attrs, debug=False
)
rewrite_relationships(oldobj, newobj, objmap, debug=False,
skip_table_names=skip_table_names)
newobj = translate(oldobj, newobj)
if not newobj:
continue # translate_fn elected to skip it
if not dummy_run:
dst_session.add(newobj)
# new PK will be created when session is flushed
if tdc.is_parent:
objmap[oldobj] = newobj # for its children's benefit
if flush_per_record:
flush()
if flush_per_table:
flush()
flush()
if commit_at_end:
log.debug("Committing...")
dst_session.commit()
log.info("merge_db(): finished")
# =============================================================================
# Unit tests
# =============================================================================
class MergeTestMixin(object):
"""
Mixin to create source/destination databases as in-memory SQLite databases
for unit testing purposes.
"""
def __init__(self, *args, echo: bool = False, **kwargs) -> None:
self.src_engine = create_engine(SQLITE_MEMORY_URL, echo=echo) # type: Engine # noqa
self.dst_engine = create_engine(SQLITE_MEMORY_URL, echo=echo) # type: Engine # noqa
self.src_session = sessionmaker(bind=self.src_engine)() # type: Session # noqa
self.dst_session = sessionmaker(bind=self.dst_engine)() # type: Session # noqa
# log.critical("SRC SESSION: {}", self.src_session)
# log.critical("DST SESSION: {}", self.dst_session)
self.Base = declarative_base()
# noinspection PyArgumentList
super().__init__(*args, **kwargs)
def dump_source(self) -> None:
log.warning("Dumping source")
dump_database_as_insert_sql(
engine=self.src_engine,
fileobj=sys.stdout,
include_ddl=True,
multirow=True
)
def dump_destination(self) -> None:
log.warning("Dumping destination")
dump_database_as_insert_sql(
engine=self.dst_engine,
fileobj=sys.stdout,
include_ddl=True,
multirow=True
)
def do_merge(self, dummy_run: bool = False) -> None:
merge_db(
base_class=self.Base,
src_engine=self.src_engine,
dst_session=self.dst_session,
allow_missing_src_tables=False,
allow_missing_src_columns=True,
translate_fn=None,
skip_tables=None,
only_tables=None,
extra_table_dependencies=None,
dummy_run=dummy_run,
report_every=1000
)
class MergeTestPlain(MergeTestMixin, unittest.TestCase):
"""
Unit tests for a simple merge operation.
*Notes re unit testing:*
- tests are found by virtue of the fact that their names start with
"test"; see
https://docs.python.org/3.6/library/unittest.html#basic-example
- A separate instance of the class is created for each test, and in each
case is called with:
.. code-block:: python
setUp()
testSOMETHING()
tearDown()
... see https://docs.python.org/3.6/library/unittest.html#test-cases
- If you use mixins, they go AFTER :class:`unittest.TestCase`; see
https://stackoverflow.com/questions/1323455/python-unit-test-with-base-and-sub-class
""" # noqa
def setUp(self) -> None:
# log.info('In setUp()')
class Parent(self.Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(Text)
class Child(self.Base):
__tablename__ = "child"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(Text)
parent_id = Column(Integer, ForeignKey("parent.id"))
parent = relationship(Parent)
self.Base.metadata.create_all(self.src_engine)
self.Base.metadata.create_all(self.dst_engine)
p1 = Parent(name="Parent 1")
p2 = Parent(name="Parent 2")
c1 = Child(name="Child 1")
c2 = Child(name="Child 2")
c1.parent = p1
c2.parent = p2
self.src_session.add_all([p1, p2, c1, c2])
self.src_session.commit()
def tearDown(self) -> None:
pass
# log.info('In tearDown()')
def test_source(self) -> None:
self.dump_source()
def test_dummy(self) -> None:
log.info("Testing merge_db() in dummy run mode")
self.do_merge(dummy_run=True)
self.dst_session.commit()
self.dump_destination()
def test_merge_to_empty(self) -> None:
log.info("Testing merge_db() to empty database")
self.do_merge(dummy_run=False)
self.dst_session.commit()
self.dump_destination()
# @unittest.skip
def test_merge_to_existing(self) -> None:
log.info("Testing merge_db() to pre-populated database")
self.do_merge(dummy_run=False)
self.dst_session.commit()
self.do_merge(dummy_run=False)
self.dst_session.commit()
self.dump_destination()
class MergeTestCircular(MergeTestMixin, unittest.TestCase):
"""
Unit tests including a circular dependency, which will fail.
"""
@unittest.expectedFailure
def test_setup_circular(self):
class Parent(self.Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(Text)
child_id = Column(Integer, ForeignKey("child.id"))
child = relationship("Child", foreign_keys=[child_id])
class Child(self.Base):
__tablename__ = "child"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(Text)
parent_id = Column(Integer, ForeignKey("parent.id"))
parent = relationship(Parent, foreign_keys=[parent_id])
self.Base.metadata.create_all(self.src_engine)
self.Base.metadata.create_all(self.dst_engine)
p1 = Parent(name="Parent 1")
p2 = Parent(name="Parent 2")
c1 = Child(name="Child 1")
c2 = Child(name="Child 2")
c1.parent = p1
c2.parent = p2
p1.child = c1
p2.child = c2
self.src_session.add_all([p1, p2, c1, c2])
self.src_session.commit() # will raise sqlalchemy.exc.CircularDependencyError # noqa
@unittest.expectedFailure
def test_circular(self) -> None:
self.test_setup_circular() # fails here
log.info("Testing merge_db() with circular relationship")
self.do_merge(dummy_run=False) # would fail here, but fails earlier!
self.dst_session.commit()
self.dump_destination()
# =============================================================================
# main
# =============================================================================
# run with "python merge_db.py -v" to be verbose
if __name__ == "__main__":
main_only_quicksetup_rootlogger()
unittest.main()
| 36.359649
| 140
| 0.595585
|
507167017c37a56d341bec21304559a189be2c37
| 2,603
|
py
|
Python
|
analysis/r_omega_two_star/r_contraction.py
|
yamauchi1132/Research_Codes
|
c9e104f8592277cb4aa5c479b014c78c702a0939
|
[
"FSFAP"
] | null | null | null |
analysis/r_omega_two_star/r_contraction.py
|
yamauchi1132/Research_Codes
|
c9e104f8592277cb4aa5c479b014c78c702a0939
|
[
"FSFAP"
] | null | null | null |
analysis/r_omega_two_star/r_contraction.py
|
yamauchi1132/Research_Codes
|
c9e104f8592277cb4aa5c479b014c78c702a0939
|
[
"FSFAP"
] | null | null | null |
import sys, os
sys.path.append(os.pardir)
import numpy as np
import operator
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
from common import *
dirname = "data/sph_t0000.dat"
def calc_r_v_ratio(p1, p2, pos1_cg, vel1_cg, pos2_cg, vel2_cg):
r_x = []
v_x = []
r_y = []
v_y = []
r_z = []
v_z = []
for i in range(len(p1)):
vx = p1[i].vel[0] - vel1_cg[0]
vy = p1[i].vel[1] - vel1_cg[1]
vz = p1[i].vel[2] - vel1_cg[2]
vx2 = vx * vx
vy2 = vy * vy
vz2 = vz * vz
v2 = vx2 + vy2 + vz2
ene = 0.5*v2 + p1[i].pot + p1[i].uene
if(ene < 0):
r_x.append(p1[i].pos[0]-pos1_cg[0])
v_x.append(vx)
r_y.append(p1[i].pos[1]-pos1_cg[1])
v_y.append(vy)
r_z.append(p1[i].pos[2]-pos1_cg[2])
v_z.append(vz)
for i in range(len(p2)):
vx = p2[i].vel[0] - vel2_cg[0]
vy = p2[i].vel[1] - vel2_cg[1]
vz = p2[i].vel[2] - vel2_cg[2]
vx2 = vx * vx
vy2 = vy * vy
vz2 = vz * vz
v2 = vx2 + vy2 + vz2
ene = 0.5*v2 + p2[i].pot + p2[i].uene
if(ene < 0):
r_x.append(p2[i].pos[0]-pos2_cg[0])
v_x.append(vx)
r_y.append(p2[i].pos[1]-pos2_cg[1])
v_y.append(vy)
r_z.append(p2[i].pos[2]-pos2_cg[2])
v_z.append(vz)
return r_x, v_x, r_y, v_y, r_z, v_z
if __name__ == '__main__':
args = sys.argv
data = np.loadtxt(dirname)
p = [Particle() for i in range(len(data))]
readfile(data, p)
p.sort(key=operator.attrgetter("p_id"))
p1 = [Particle() for i in range(len(data)//2)]
p2 = [Particle() for i in range(len(data)//2)]
for i in range(len(p)):
if i < len(p)//2:
p1[i] = p[i]
else:
p2[i-(len(p))] = p[i]
pos1_cg = np.array([0.,0.,0.])
vel1_cg = np.array([0.,0.,0.])
pos2_cg = np.array([0.,0.,0.])
vel2_cg = np.array([0.,0.,0.])
pos1_cg, vel1_cg = calc_center_of_gravity(p1)
pos2_cg, vel2_cg = calc_center_of_gravity(p2)
r_x, v_x, r_y, v_y, r_z, v_z = calc_r_v_ratio(p1, p2, pos1_cg, vel1_cg, pos2_cg, vel2_cg)
#plot(r1, omega1, r2, omega2)
fx = open('r_contraction_x.data', 'w')
fy = open('r_contraction_y.data', 'w')
fz = open('r_contraction_z.data', 'w')
# f = open('axis_1.3.data', 'w')
for i in range(len(r_x)//2):
fx.write("%e %e %e %e\n"%(r_x[i], v_x[i], r_x[i+(len(r_x)//2)], v_x[i+(len(r_x)//2)]))
for i in range(len(r_y)//2):
fy.write("%e %e %e %e\n"%(r_y[i], v_y[i], r_y[i+(len(r_y)//2)], v_y[i+(len(r_y)//2)]))
for i in range(len(r_z)//2):
fz.write("%e %e %e %e\n"%(r_z[i], v_z[i], r_z[i+(len(r_z)//2)], v_z[i+(len(r_z)//2)]))
fx.close()
fy.close()
fz.close()
| 26.561224
| 91
| 0.557434
|
39a641195b0929989264a20388d5cb86436d83ff
| 370
|
py
|
Python
|
Program/QT_Line.py
|
EleKoPhi/GBCT_new
|
660c41a009a4d9db0a872369cb9519d8e37a9c5d
|
[
"CECILL-B"
] | null | null | null |
Program/QT_Line.py
|
EleKoPhi/GBCT_new
|
660c41a009a4d9db0a872369cb9519d8e37a9c5d
|
[
"CECILL-B"
] | null | null | null |
Program/QT_Line.py
|
EleKoPhi/GBCT_new
|
660c41a009a4d9db0a872369cb9519d8e37a9c5d
|
[
"CECILL-B"
] | null | null | null |
from PyQt5.QtWidgets import *
class QHLine(QFrame):
def __init__(self):
super(QHLine, self).__init__()
self.setFrameShape(QFrame.HLine)
self.setFrameShadow(QFrame.Sunken)
class QVLine(QFrame):
def __init__(self):
super(QVLine, self).__init__()
self.setFrameShape(QFrame.VLine)
self.setFrameShadow(QFrame.Sunken)
| 26.428571
| 42
| 0.675676
|
b2608621c012f722b2831025cc1520c0b929eedf
| 3,558
|
py
|
Python
|
iscc_bench/imageid/ssc.py
|
coblo/isccbench
|
daf22cdd740fa99c62c4a56233593159e398596b
|
[
"BSD-2-Clause"
] | 3
|
2019-07-07T19:30:26.000Z
|
2020-09-17T10:17:36.000Z
|
iscc_bench/imageid/ssc.py
|
coblo/isccbench
|
daf22cdd740fa99c62c4a56233593159e398596b
|
[
"BSD-2-Clause"
] | 3
|
2019-04-15T13:00:48.000Z
|
2021-12-13T19:44:12.000Z
|
iscc_bench/imageid/ssc.py
|
coblo/isccbench
|
daf22cdd740fa99c62c4a56233593159e398596b
|
[
"BSD-2-Clause"
] | 1
|
2021-02-25T16:13:14.000Z
|
2021-02-25T16:13:14.000Z
|
# -*- coding: utf-8 -*-
"""
Original Code (MIT License) from: https://github.com/BAILOOL/ANMS-Codes
"""
import math
def SSC(keypoints, num_ret_points, tolerance, cols, rows):
exp1 = rows + cols + 2 * num_ret_points
exp2 = (
4 * cols
+ 4 * num_ret_points
+ 4 * rows * num_ret_points
+ rows * rows
+ cols * cols
- 2 * rows * cols
+ 4 * rows * cols * num_ret_points
)
exp3 = math.sqrt(exp2)
exp4 = 2 * (num_ret_points - 1)
sol1 = -round(float(exp1 + exp3) / exp4) # first solution
sol2 = -round(float(exp1 - exp3) / exp4) # second solution
high = (
sol1 if (sol1 > sol2) else sol2
) # binary search range initialization with positive solution
low = math.floor(math.sqrt(len(keypoints) / num_ret_points))
prevWidth = -1
selected_keypoints = []
ResultVec = []
result = []
complete = False
K = num_ret_points
Kmin = round(K - (K * tolerance))
Kmax = round(K + (K * tolerance))
while ~complete:
width = low + (high - low) / 2
if (
width == prevWidth or low > high
): # needed to reassure the same radius is not repeated again
ResultVec = result # return the keypoints from the previous iteration
break
c = width / 2
# initializing Grid
numCellCols = int(math.floor(cols / c))
numCellRows = int(math.floor(rows / c))
coveredVec = [
[False for i in range(numCellCols + 1)] for j in range(numCellCols + 1)
]
result = []
for i in range(len(keypoints)):
row = int(
math.floor(keypoints[i].pt[1] / c)
) # get position of the cell current point is located at
col = int(math.floor(keypoints[i].pt[0] / c))
if coveredVec[row][col] == False: # if the cell is not covered
result.append(i)
rowMin = int(
(row - math.floor(width / c))
if ((row - math.floor(width / c)) >= 0)
else 0
) # get range which current radius is covering
rowMax = int(
(row + math.floor(width / c))
if ((row + math.floor(width / c)) <= numCellRows)
else numCellRows
)
colMin = int(
(col - math.floor(width / c))
if ((col - math.floor(width / c)) >= 0)
else 0
)
colMax = int(
(col + math.floor(width / c))
if ((col + math.floor(width / c)) <= numCellCols)
else numCellCols
)
for rowToCov in range(rowMin, rowMax + 1):
for colToCov in range(colMin, colMax + 1):
if ~coveredVec[rowToCov][colToCov]:
coveredVec[rowToCov][
colToCov
] = True # cover cells within the square bounding box with width w
if len(result) >= Kmin and len(result) <= Kmax: # solution found
ResultVec = result
complete = True
elif len(result) < Kmin:
high = width - 1 # update binary search range
else:
low = width + 1
prevWidth = width
for i in range(len(ResultVec)):
selected_keypoints.append(keypoints[ResultVec[i]])
return selected_keypoints
| 34.543689
| 95
| 0.497752
|
6a13e8a5d86376c8e8432dd9568bb1797c23a786
| 491
|
py
|
Python
|
Python3/0087-Scramble-String/soln-1.py
|
wyaadarsh/LeetCode-Solutions
|
3719f5cb059eefd66b83eb8ae990652f4b7fd124
|
[
"MIT"
] | 5
|
2020-07-24T17:48:59.000Z
|
2020-12-21T05:56:00.000Z
|
Python3/0087-Scramble-String/soln-1.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | null | null | null |
Python3/0087-Scramble-String/soln-1.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | 2
|
2020-07-24T17:49:01.000Z
|
2020-08-31T19:57:35.000Z
|
class Solution:
def isScramble(self, s1: str, s2: str) -> bool:
if len(s1) != len(s2):
return False
if s1 == s2:
return True
if sorted(s1) != sorted(s2):
return False
n = len(s1)
for i in range(1, n):
if (self.isScramble(s1[:i], s2[-i:]) and self.isScramble(s1[i:], s2[:-i])) or (self.isScramble(s1[:i], s2[:i]) and self.isScramble(s1[i:], s2[i:])):
return True
return False
| 35.071429
| 160
| 0.492872
|
43ab2ee8a025745f55a9ab0ab119316b867a1072
| 2,272
|
py
|
Python
|
numba_dppy/examples/debug/side-by-side-2.py
|
akharche/numba-dppy
|
f12dac64b149bd72f305f341ff64b796bbb648c1
|
[
"Apache-2.0"
] | 22
|
2020-11-25T12:13:33.000Z
|
2022-03-10T14:26:14.000Z
|
numba_dppy/examples/debug/side-by-side-2.py
|
akharche/numba-dppy
|
f12dac64b149bd72f305f341ff64b796bbb648c1
|
[
"Apache-2.0"
] | 439
|
2020-11-17T14:48:38.000Z
|
2022-03-31T10:09:47.000Z
|
numba_dppy/examples/debug/side-by-side-2.py
|
akharche/numba-dppy
|
f12dac64b149bd72f305f341ff64b796bbb648c1
|
[
"Apache-2.0"
] | 11
|
2020-11-24T14:29:46.000Z
|
2022-03-10T05:50:27.000Z
|
# Copyright 2020, 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import dpctl
import numba
import numpy as np
import numba_dppy as dppy
def common_loop_body(i, a, b):
param_a = a[i]
param_b = b[i]
param_c = param_a + 10 # Set breakpoint here
param_d = param_b * 0.5
result = param_c + param_d
return result
def scenario(api):
print("Using API:", api)
global_size = 10
a, b, c = arguments(global_size)
if api == "numba-dppy-kernel":
dppy_func_driver(a, b, c)
else:
numba_func_driver(a, b, c)
print(a, b, c, sep="\n")
def arguments(N, dtype=np.float32):
a = np.arange(N, dtype=dtype)
b = np.arange(N, dtype=dtype)
c = np.empty_like(a)
return a, b, c
@numba.njit(debug=True)
def numba_func_driver(a, b, c):
for i in range(len(c)):
c[i] = numba_loop_body(i, a, b)
def dppy_func_driver(a, b, c):
device = dpctl.select_default_device()
with dpctl.device_context(device):
dppy_kernel[len(c), dppy.DEFAULT_LOCAL_SIZE](a, b, c)
@dppy.kernel(debug=True)
def dppy_kernel(a_in_kernel, b_in_kernel, c_in_kernel):
i = dppy.get_global_id(0)
c_in_kernel[i] = dppy_loop_body(i, a_in_kernel, b_in_kernel)
numba_loop_body = numba.njit(debug=True)(common_loop_body)
dppy_loop_body = dppy.func(debug=True)(common_loop_body)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--api",
required=False,
default="numba",
choices=["numba", "numba-dppy-kernel"],
help="Start the version of functions using numba or numba-dppy API",
)
args = parser.parse_args()
scenario(args.api)
print("Done...")
if __name__ == "__main__":
main()
| 23.915789
| 76
| 0.673856
|
96b9d10d0fa3b66dc3f0332d443b25e9744b44e2
| 43,987
|
py
|
Python
|
script_gravidade_bif.py
|
jpra2/Presto2.1
|
e2a3e3121280b011a6be2a59be708623bdc0b482
|
[
"CNRI-Python"
] | 1
|
2018-12-04T19:32:27.000Z
|
2018-12-04T19:32:27.000Z
|
script_gravidade_bif.py
|
jpra2/Presto2.1
|
e2a3e3121280b011a6be2a59be708623bdc0b482
|
[
"CNRI-Python"
] | null | null | null |
script_gravidade_bif.py
|
jpra2/Presto2.1
|
e2a3e3121280b011a6be2a59be708623bdc0b482
|
[
"CNRI-Python"
] | null | null | null |
from test34_bif import Msclassic_bif
import time
import numpy as np
from PyTrilinos import Epetra, AztecOO, EpetraExt
class gravidade_bif(Msclassic_bif):
def __init__(self):
super().__init__()
self.run_grav()
def create_flux_vector_pf_gr(self):
"""
cria um vetor para armazenar os fluxos em cada volume da malha fina
os fluxos sao armazenados de acordo com a direcao sendo 6 direcoes
para cada volume, adicinando o efeito da gravidade
"""
t0 = time.time()
verif_local = 1
lim4 = 1e-4
soma = 0
soma2 = 0
soma3 = 0
store_flux_pf = {}
for volume in self.all_fine_vols:
#1
flux = {}
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
centroid_volume = self.mesh_topo_util.get_average_position([volume])
z_vol = self.tz - centroid_volume[2]
adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
gid_vol = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
pvol = self.mb.tag_get_data(self.pf_tag, volume, flat=True)[0]
for adj in adjs_vol:
#2
gid_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
padj = self.mb.tag_get_data(self.pf_tag, adj, flat=True)[0]
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
centroid_adj = self.mesh_topo_util.get_average_position([adj])
z_adj = self.tz - centroid_adj[2]
direction = centroid_adj - centroid_volume
altura = centroid_adj[2]
unit = direction/np.linalg.norm(direction)
#unit = vetor unitario na direcao de direction
uni = self.unitary(direction)
z = uni[2]
# uni = valor positivo do vetor unitario
kvol = np.dot(np.dot(kvol,uni),uni)
kadj = np.dot(np.dot(kadj,uni),uni)
keq = self.kequiv(kvol, kadj)
keq = keq*(np.dot(self.A, uni))/(self.mi)
grad_p = (padj - pvol)/float(abs(np.dot(direction, uni)))
grad_z = (z_adj - z_vol)/float(abs(np.dot(direction, uni)))
q = (grad_p)*keq - grad_z*keq*self.gama
flux[tuple(unit)] = q
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
#1
store_flux_pf[volume] = flux
flt = sum(flux.values())
# print(gid_vol)
# print(flt)
# print(store_flux_pf)
# print('\n')
# import pdb; pdb.set_trace()
self.mb.tag_set_data(self.flux_fine_pf_tag, volume, flt)
soma += flt
if abs(flt) > lim4 and volume not in self.wells:
verif_local = 0
print('nao esta dando conservativo na malha fina')
print(gid_vol)
print(flt)
import pdb; pdb.set_trace()
soma_prod = []
soma_inj = []
with open('fluxo_malha_fina_gr.txt', 'w') as arq:
for volume in self.wells:
gid = self.mb.tag_get_data(self.global_id_tag, volume, flat = True)[0]
values = store_flux_pf[volume].values()
arq.write('gid:{0} , fluxo:{1}\n'.format(gid, sum(values)))
# print('gid:{0}'.format(gid))
# print('valor:{0}'.format(sum(values)))
if volume in self.wells_inj:
soma_inj.append(sum(values))
else:
soma_prod.append(sum(values))
# print('\n')
soma2 += sum(values)
arq.write('\n')
arq.write('soma_inj:{0}\n'.format(sum(soma_inj)))
arq.write('soma_prod:{0}'.format(sum(soma_prod)))
print('soma_inj:{0}'.format(sum(soma_inj)))
print('soma_prod:{0}'.format(sum(soma_prod)))
print('soma2 : {0}'.format(soma2))
if abs(soma2) > lim4:
print('nao esta dando conservativo globalmente')
import pdb; pdb.set_trace()
# print('saiu de def create_flux_vector_pf')
print('\n')
tf = time.time()
# import pdb; pdb.set_trace()
return store_flux_pf
def create_flux_vector_pf_gr_bif_1(self):
"""
cria um vetor para armazenar os fluxos em cada volume da malha fina
os fluxos sao armazenados de acordo com a direcao sendo 6 direcoes
para cada volume
adiciona o efeito da gravidade
"""
# volumes_in_primal_set = self.mb.tag_get_data(self.volumes_in_primal_tag, 0, flat=True)[0]
# volumes_in_primal_set = self.mb.get_entities_by_handle(volumes_in_primal_set)
lim = 1e-4
self.dfdsmax = 0
self.fimin = 10
self.qmax = 0
self.store_velocity_pf = {}
store_flux_pf = {}
for primal in self.primals:
#1
primal_id1 = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
primal_id = self.ident_primal[primal_id1]
fine_elems_in_primal = self.mb.get_entities_by_handle(primal)
for volume in fine_elems_in_primal:
#2
list_keq = []
list_p = []
list_gid = []
list_keq3 = []
list_gidsadj = []
list_qw = []
qw3 = []
qw = 0
flux = {}
velocity = {}
fi = self.mb.tag_get_data(self.fi_tag, volume, flat=True)[0]
if fi < self.fimin:
self.fimin = fi
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume, flat=True)[0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume, flat=True)[0]
lbt_vol = lamb_w_vol + lamb_o_vol
fw_vol = self.mb.tag_get_data(self.fw_tag, volume, flat=True)[0]
sat_vol = self.mb.tag_get_data(self.sat_tag, volume, flat=True)[0]
centroid_volume = self.mesh_topo_util.get_average_position([volume])
z_vol = self.tz - centroid_volume[2]
adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
gid_vol = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
pvol = self.mb.tag_get_data(self.pf_tag, volume, flat=True)[0]
for adj in adjs_vol:
#3
gid_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
sat_adj = self.mb.tag_get_data(self.sat_tag, adj, flat=True)[0]
padj = self.mb.tag_get_data(self.pf_tag, adj, flat=True)[0]
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
centroid_adj = self.mesh_topo_util.get_average_position([adj])
z_adj = self.tz - centroid_adj[2]
direction = centroid_adj - centroid_volume
unit = direction/np.linalg.norm(direction)
#unit = vetor unitario na direcao de direction
uni = self.unitary(direction)
# uni = valor positivo do vetor unitario
kvol = np.dot(np.dot(kvol,uni),uni)
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj, flat=True)[0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj, flat=True)[0]
lbt_adj = lamb_w_adj + lamb_o_adj
fw_adj = self.mb.tag_get_data(self.fw_tag, adj, flat=True)[0]
keq3 = (kvol*lamb_w_vol + kadj*lamb_w_adj)/2.0
# kvol = kvol*(lamb_w_vol + lamb_o_vol)
# kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)*((lbt_adj + lbt_vol)/2.0)
grad_p = (padj - pvol)/float(abs(np.dot(direction, uni)))
grad_z = (z_adj - z_vol)/float(abs(np.dot(direction, uni)))
q = ((grad_p) - grad_z*self.gama)*(np.dot(self.A, uni))*keq
list_keq.append(keq)
list_p.append(padj)
list_gid.append(gid_adj)
keq2 = keq
qw += q*(fw_adj + fw_vol)/2.0
#keq = keq*(np.dot(self.A, uni))
#pvol2 = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]
#padj2 = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]
#grad_p2 = (padj2 - pvol2)/float(abs(np.dot(direction, uni)))
#q = (grad_p)*keq
#qw3.append(grad_p*keq3*(np.dot(self.A, uni)))
# if grad_p < 0:
# #4
# fw = fw_vol
# qw += (fw*grad_p*kvol*(np.dot(self.A, uni)))
# list_qw.append(fw*grad_p*kvol*(np.dot(self.A, uni)))
#
# else:
# fw = fw_adj
# qw += (fw*grad_p*kadj*(np.dot(self.A, uni)))
# list_qw.append(fw*grad_p*kadj*(np.dot(self.A, uni)))
# if gid_adj > gid_vol:
# v = -(grad_p)*keq2
# else:
# v = (grad_p)*keq2
flux[tuple(unit)] = q
#velocity[tuple(unit)] = v
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
if abs(sat_adj - sat_vol) < lim or abs(fw_adj -fw_vol) < lim:
continue
dfds = abs((fw_adj - fw_vol)/(sat_adj - sat_vol))
# print('aqui')
# print(gid_vol)
# print(gid_adj)
# print(fw_adj - fw_vol)
# print(sat_adj - sat_vol)
# print(dfds)
if dfds > self.dfdsmax:
self.dfdsmax = dfds
#2
# list_keq.append(-sum(list_keq))
# list_p.append(pvol)
# list_gid.append(gid_vol)
#
# list_keq = np.array(list_keq)
# list_p = np.array(list_p)
# resultado = sum(list_keq*list_p)
# print(gid_vol)
# print(velocity)
# print('\n')
# import pdb; pdb.set_trace()
#self.store_velocity_pf[volume] = velocity
store_flux_pf[volume] = flux
flt = sum(flux.values())
print('gid')
print(gid_vol)
print('flux')
print(flt)
print('\n')
import pdb; pdb.set_trace()
self.mb.tag_set_data(self.flux_fine_pf_tag, volume, flt)
if abs(sum(flux.values())) > lim and volume not in self.wells:
print('nao esta dando conservativo na malha fina')
print(gid_vol)
print(sum(flux.values()))
import pdb; pdb.set_trace()
qmax = max(list(map(abs, flux.values())))
if qmax > self.qmax:
self.qmax = qmax
if volume in self.wells_prod:
qw_out = sum(flux.values())*fw_vol
#qw3.append(-qw_out)
qo_out = sum(flux.values())*(1 - fw_vol)
self.prod_o.append(qo_out)
self.prod_w.append(qw_out)
qw = qw - qw_out
if abs(qw) < lim and qw < 0.0:
qw = 0.0
elif qw < 0 and volume not in self.wells_inj:
print('gid')
print(gid_vol)
print('qw < 0')
print(qw)
import pdb; pdb.set_trace()
else:
pass
# if (qw < 0.0 or sum(qw3) < 0.0) and volume not in self.wells_inj:
# print('qw3')
# print(sum(qw3))
# print('qw')
# print(qw)
# import pdb; pdb.set_trace()
self.mb.tag_set_data(self.flux_w_tag, volume, qw)
# print(self.dfdsmax)
# print(sum(flux.values()))
# print(sum(qw))
# print(sum(qw3))
# print('\n')
soma_inj = []
soma_prod = []
soma2 = 0
with open('fluxo_malha_fina_bif_gr{0}.txt'.format(self.loop), 'w') as arq:
for volume in self.wells:
gid = self.mb.tag_get_data(self.global_id_tag, volume, flat = True)[0]
values = self.store_flux_pf[volume].values()
arq.write('gid:{0} , fluxo:{1}\n'.format(gid, sum(values)))
# print('gid:{0}'.format(gid))
# print('valor:{0}'.format(sum(values)))
if volume in self.wells_inj:
soma_inj.append(sum(values))
else:
soma_prod.append(sum(values))
# print('\n')
soma2 += sum(values)
arq.write('\n')
arq.write('soma_inj:{0}\n'.format(sum(soma_inj)))
arq.write('soma_prod:{0}\n'.format(sum(soma_prod)))
arq.write('tempo:{0}'.format(self.tempo))
return store_flux_pf
def create_flux_vector_pms_gr(self):
"""
cria um vetor para armazenar os fluxos em cada volume da malha fina
os fluxos sao armazenados de acordo com a direcao sendo 6 direcoes
para cada volume adicinando o efeito da gravidade
"""
soma_prod = 0
soma_inj = 0
lim4 = 1e-4
store_velocity = {}
store_flux = {}
for primal in self.primals:
#1
primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
primal_id = self.ident_primal[primal_id]
fine_elems_in_primal = self.mb.get_entities_by_handle(primal)
for volume in fine_elems_in_primal:
#2
flux = {}
velocity = {}
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
centroid_volume = self.mesh_topo_util.get_average_position([volume])
z_vol = self.tz - centroid_volume[2]
adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
gid_vol = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
for adj in adjs_vol:
#3
gid_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
if adj not in fine_elems_in_primal:
#4
pvol = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]
padj = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]
#3
else:
#4
pvol = self.mb.tag_get_data(self.pcorr_tag, volume, flat=True)[0]
padj = self.mb.tag_get_data(self.pcorr_tag, adj, flat=True)[0]
#3
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
centroid_adj = self.mesh_topo_util.get_average_position([adj])
z_adj = self.tz - centroid_adj[2]
direction = centroid_adj - centroid_volume
unit = direction/np.linalg.norm(direction)
#unit = vetor unitario na direcao de direction
uni = self.unitary(direction)
# uni = valor positivo do vetor unitario
kvol = np.dot(np.dot(kvol,uni),uni)
kadj = np.dot(np.dot(kadj,uni),uni)
keq = self.kequiv(kvol, kadj)/(self.mi)
keq2 = keq
keq = keq*(np.dot(self.A, uni))
pvol2 = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]
padj2 = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]
grad_p = (padj - pvol)/float(abs(np.dot(direction, uni)))
grad_z = (z_adj - z_vol)/float(abs(np.dot(direction, uni)))
grad_p2 = (padj2 - pvol2)/float(abs(np.dot(direction, uni)))
q = (grad_p)*keq - grad_z*keq*self.gama
print((grad_p)*keq)
print(- grad_z*keq*self.gama)
print(q)
print(self.store_flux_pf_gr[volume][tuple(unit)])
print('\n')
import pdb; pdb.set_trace()
if gid_adj > gid_vol:
v = -((grad_p2)*keq2 - grad_z*self.gama*keq2)
else:
v = -((grad_p2)*keq2 - grad_z*self.gama*keq2)
flux[tuple(unit)] = q
velocity[tuple(unit)] = v
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
#2
# print(gid_vol)
# print(velocity)
# print('\n')
# import pdb; pdb.set_trace()
store_flux[volume] = flux
self.mb.tag_set_data(self.flux_fine_pms_tag, volume, sum(flux.values()))
# flt = sum(flux.values())
# if volume not in self.wells_inj and volume not in self.wells_prod:
# lim4 = 1e-7
# if abs(flt) > lim4:
# print(gid_vol)
# print(flt)
# import pdb; pdb.set_trace()
# flt = sum(flux.values())
store_velocity[volume] = velocity
for volume in set(self.all_fine_vols) - set(self.wells):
gid = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
values = store_flux[volume].values()
if sum(values) > lim4:
print('fluxo multiescala nao esta dando conservativo')
print('gid:{0}'.format(gid))
print(sum(values))
import pdb; pdb.set_trace()
with open('fluxo_multiescala_gr.txt', 'w') as arq:
for volume in self.wells:
gid = self.mb.tag_get_data(self.global_id_tag, volume, flat= True)[0]
values = store_flux[volume].values()
if volume in self.wells_inj:
soma_inj += sum(values)
else:
soma_prod += sum(values)
arq.write('gid:{0} , fluxo:{1}\n'.format(gid, sum(values)))
arq.write('\n')
arq.write('soma_inj:{0}\n'.format(soma_inj))
arq.write('soma_prod:{0}\n'.format(soma_prod))
return store_flux
def mount_lines_5_gr(self, volume, map_id):
"""
monta as linhas da matriz
retorna o valor temp_k e o mapeamento temp_id
map_id = mapeamento local dos elementos
adiciona o efeito da gravidade
temp_ids = [] # vetor com ids dados pelo mapeamento
temp_k = [] # vetor com a permeabilidade equivalente
temp_kgr = [] # vetor com a permeabilidade equivalente multipicada pelo gama
temp_hs = [] # vetor com a diferença de altura dos elementos
"""
#0
soma2 = 0.0
soma3 = 0.0
temp_ids = []
temp_k = []
temp_kgr = []
temp_hs = []
gid1 = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
volume_centroid = self.mesh_topo_util.get_average_position([volume])
adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
for adj in adj_volumes:
#2
gid2 = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
#temp_ps.append(padj)
adj_centroid = self.mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
altura = adj_centroid[2]
uni = self.unitary(direction)
z = uni[2]
kvol = np.dot(np.dot(kvol,uni),uni)
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
keq = self.kequiv(kvol, kadj)
keq = keq*(np.dot(self.A, uni))/(self.mi*abs(np.dot(direction, uni)))
keq2 = keq*self.gama
temp_kgr.append(-keq2)
temp_hs.append(self.tz - altura)
temp_ids.append(map_id[adj])
temp_k.append(-keq)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
#1
# soma2 = soma2*(self.tz-volume_centroid[2])
# soma2 = -(soma2 + soma3)
temp_hs.append(self.tz-volume_centroid[2])
temp_kgr.append(-sum(temp_kgr))
temp_k.append(-sum(temp_k))
temp_ids.append(map_id[volume])
#temp_ps.append(pvol)
return temp_k, temp_ids, temp_hs, temp_kgr
def Neuman_problem_6_gr(self):
# self.set_of_collocation_points_elems = set()
#0
"""
map_volumes[volume]
map_volumes[adj]
"""
volumes_in_primal_set = self.mb.tag_get_data(self.volumes_in_primal_tag, 0, flat=True)[0]
volumes_in_primal_set = self.mb.get_entities_by_handle(volumes_in_primal_set)
for primal in self.primals:
#1
primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
fine_elems_in_primal = self.mb.get_entities_by_handle(primal)
volumes_in_primal = set(fine_elems_in_primal) & set(volumes_in_primal_set)
#all_volumes = list(fine_elems_in_primal)
dim = len(fine_elems_in_primal)
map_volumes = dict(zip(fine_elems_in_primal, range(len(fine_elems_in_primal))))
std_map = Epetra.Map(len(fine_elems_in_primal), 0, self.comm)
b = Epetra.Vector(std_map)
A = Epetra.CrsMatrix(Epetra.Copy, std_map, 3)
# b_np = np.zeros(dim)
# A_np = np.zeros((dim, dim))
for volume in fine_elems_in_primal:
#2
soma = 0
centroid_volume = self.mesh_topo_util.get_average_position([volume])
z_vol = self.tz - centroid_volume[2]
pvol = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]
k_vol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
temp_k = []
temp_id = []
if (volume in self.wells_d) or (volume in self.set_of_collocation_points_elems):
#3
# value = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]
temp_k.append(1.0)
temp_id.append(map_volumes[volume])
b[map_volumes[volume]] = pvol
# b_np[map_volumes[volume]] = value
#2
elif volume in volumes_in_primal:
#3
for adj in adjs_vol:
#4
gid2 = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
padj = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]
centroid_adj = self.mesh_topo_util.get_average_position([adj])
z_adj = self.tz - centroid_adj[2]
direction = centroid_adj - centroid_volume
uni = self.unitary(direction)
# h = abs(np.dot(direction, uni))
k_vol = np.dot(np.dot(k_vol,uni),uni)
k_adj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
k_adj = np.dot(np.dot(k_adj,uni),uni)
keq = self.kequiv(k_vol, k_adj)
keq = keq*(np.dot(self.A, uni))/(self.mi*abs(np.dot(direction, uni)))
keq2 = keq*self.gama
if adj in fine_elems_in_primal:
#5
# soma += keq
temp_k.append(-keq)
temp_id.append(map_volumes[adj])
b[map_volumes[volume]] += -(z_adj - z_vol)*keq2
#4
else:
#5
q_in = (padj - pvol)*(keq) - (z_adj - z_vol)*keq2
b[map_volumes[volume]] += q_in
# b_np[map_volumes[volume]] += q_in
k_vol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
#3
temp_k.append(-sum(temp_k))
temp_id.append(map_volumes[volume])
if volume in self.wells_n:
#4
index = self.wells_n.index(volume)
if volume in self.wells_inj:
#5
b[map_volumes[volume]] += self.set_q[index]
# b_np[map_volumes[volume]] += self.set_q[index]
#4
else:
#5
b[map_volumes[volume]] -= self.set_q[index]
# b_np[map_volumes[volume]] -= self.set_q[index]
#2
else:
#3
temp_k, temp_id, temp_hs, temp_kgr = self.mount_lines_5_gr(volume, map_volumes)
temp_hs = np.array(temp_hs)
temp_kgr = np.array(temp_kgr)
b[map_volumes[volume]] += - (np.dot(temp_hs, temp_kgr))
print(- (np.dot(temp_hs, temp_kgr)))
print(temp_hs)
print(temp_kgr)
print('\n')
import pdb; pdb.set_trace()
if volume in self.wells_n:
#4
index = self.wells_n.index(volume)
if volume in self.wells_inj:
#5
b[map_volumes[volume]] += self.set_q[index]
# b_np[map_volumes[volume]] += self.set_q[index]
#4
else:
#5
b[map_volumes[volume]] -= self.set_q[index]
# b_np[map_volumes[volume]] -= self.set_q[index]
#2
A.InsertGlobalValues(map_volumes[volume], temp_k, temp_id)
# A_np[map_volumes[volume], temp_id] = temp_k
# print('primal_id')
# print(self.ident_primal[primal_id])
# print('gid: {0}'.format(gid1))
# print('temp_id:{0}'.format(temp_id))
# print('temp_k:{0}'.format(temp_k))
# print(A_np[map_volumes[volume]])
# print('b_np:{0}'.format(b_np[map_volumes[volume]]))
#1
A.FillComplete()
x = self.solve_linear_problem(A, b, dim)
# x_np = np.linalg.solve(A_np, b_np)
# print(x_np)
for volume in fine_elems_in_primal:
#2
self.mb.tag_set_data(self.pcorr_tag, volume, x[map_volumes[volume]])
# self.mb.tag_set_data(self.pms3_tag, volume, x_np[map_volumes[volume]])
def run_grav(self):
"""
roda o programa inteiro adicionando o efeito da gravidade
"""
# Solucao direta
self.prod_w = []
self.prod_o = []
t0 = time.time()
# self.set_volumes_in_primal()
self.set_sat_in()
self.set_lamb_2()
self.set_global_problem_vf_3_gr1_bif()
self.Pf = self.solve_linear_problem(self.trans_fine, self.b, len(self.all_fine_vols_ic))
self.organize_Pf()
del self.Pf
self.mb.tag_set_data(self.pf_tag, self.all_fine_vols, np.asarray(self.Pf_all))
del self.Pf_all
self.test_conservation_fine()
# self.store_flux_pf_gr_bif = self.create_flux_vector_pf_gr_bif_1()
"""
################################################################
# Solucao Multiescala
self.calculate_restriction_op_2()
self.calculate_prolongation_op_het()
self.organize_op()
self.Tc = self.modificar_matriz(self.pymultimat(self.pymultimat(self.trilOR, self.trans_fine, self.nf_ic), self.trilOP, self.nf_ic), self.nc, self.nc)
self.Qc = self.modificar_vetor(self.multimat_vector(self.trilOR, self.nf_ic, self.b), self.nc)
self.Pc = self.solve_linear_problem(self.Tc, self.Qc, self.nc)
self.set_Pc()
self.Pms = self.multimat_vector(self.trilOP, self.nf_ic, self.Pc)
del self.trilOP
del self.trilOR
del self.Tc
del self.Qc
del self.Pc
self.organize_Pms()
del self.Pms
self.mb.tag_set_data(self.pms_tag, self.all_fine_vols, np.asarray(self.Pms_all))
del self.Pms_all
self.erro()
self.test_conservation_coarse_gr()
# self.Neuman_problem_6_gr()
# self.store_flux_pms_gr = self.create_flux_vector_pms_gr()
####################################################################
"""
print('acaboooou')
self.mb.write_file('new_out_bif_gr.vtk')
shutil.copytree(self.caminho1, self.pasta)
def set_global_problem_vf_3_gr1_bif(self):
"""
transmissibilidade da malha fina excluindo os volumes com pressao prescrita
usando a mobilidade media
adiciona o efeito da gravidade
"""
#0
std_map = Epetra.Map(len(self.all_fine_vols_ic),0,self.comm)
self.trans_fine = Epetra.CrsMatrix(Epetra.Copy, std_map, 7)
self.b = Epetra.Vector(std_map)
for volume in self.all_fine_vols_ic - set(self.neigh_wells_d):
#1
soma = 0.0
soma2 = 0.0
soma3 = 0.0
volume_centroid = self.mesh_topo_util.get_average_position([volume])
adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
lbt_vol = lamb_w_vol + lamb_o_vol
z_vol = self.tz - volume_centroid[2]
soma = 0.0
temp_glob_adj = []
temp_k = []
flux_gr = []
for adj in adj_volumes:
#2
global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
adj_centroid = self.mesh_topo_util.get_average_position([adj])
z_adj = self.tz - adj_centroid[2]
altura = adj_centroid[2]
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
#kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
lbt_adj = lamb_w_adj + lamb_o_adj
#kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)*((lbt_adj + lbt_vol)/2.0)
keq = keq*(np.dot(self.A, uni)/float(abs(np.dot(direction, uni))))
grad_z = (z_adj - z_vol)
q_grad_z = grad_z*self.gama*keq
flux_gr.append(q_grad_z)
temp_glob_adj.append(self.map_vols_ic[adj])
temp_k.append(-keq)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
#1
soma2 = -sum(flux_gr)
temp_k.append(-sum(temp_k))
temp_glob_adj.append(self.map_vols_ic[volume])
self.trans_fine.InsertGlobalValues(self.map_vols_ic[volume], temp_k, temp_glob_adj)
if volume in self.wells_n:
#2
index = self.wells_n.index(volume)
# tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, volume)
if volume in self.wells_inj:
#3
self.b[self.map_vols_ic[volume]] += self.set_q[index] + soma2
#2
else:
#3
self.b[self.map_vols_ic[volume]] += -self.set_q[index] + soma2
#1
else:
#2
self.b[self.map_vols_ic[volume]] += soma2
#0
for volume in self.neigh_wells_d:
#1
soma2 = 0.0
soma3 = 0.0
volume_centroid = self.mesh_topo_util.get_average_position([volume])
z_vol = self.tz - volume_centroid[2]
adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
lbt_vol = lamb_w_vol + lamb_o_vol
soma = 0.0
temp_glob_adj = []
temp_k = []
flux_gr = []
for adj in adj_volumes:
#2
global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
adj_centroid = self.mesh_topo_util.get_average_position([adj])
z_adj = self.tz - adj_centroid[2]
altura = adj_centroid[2]
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
z = uni[2]
kvol = np.dot(np.dot(kvol,uni),uni)
#kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
lbt_adj = lamb_o_adj + lamb_o_adj
#kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)*((lbt_adj + lbt_vol)/2.0)
keq = keq*(np.dot(self.A, uni)/(abs(np.dot(direction, uni))))
grad_z = (z_adj - z_vol)
q_grad_z = grad_z*self.gama*keq
flux_gr.append(q_grad_z)
#2
if adj in self.wells_d:
#3
soma = soma + keq
index = self.wells_d.index(adj)
self.b[self.map_vols_ic[volume]] += self.set_p[index]*(keq)
#2
else:
#3
temp_glob_adj.append(self.map_vols_ic[adj])
temp_k.append(-keq)
soma = soma + keq
#2
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
#1
soma2 = -sum(flux_gr)
temp_k.append(soma)
temp_glob_adj.append(self.map_vols_ic[volume])
self.trans_fine.InsertGlobalValues(self.map_vols_ic[volume], temp_k, temp_glob_adj)
if volume in self.wells_n:
#2
index = self.wells_n.index(volume)
# tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, volume)
if volume in self.wells_inj:
#3
self.b[self.map_vols_ic[volume]] += self.set_q[index] + soma2
#2
else:
#3
self.b[self.map_vols_ic[volume]] += -self.set_q[index] + soma2
#1
else:
#2
self.b[self.map_vols_ic[volume]] += soma2
#0
self.trans_fine.FillComplete()
def test_conservation_coarse_gr(self):
"""
verifica se o fluxo é conservativo nos volumes da malha grossa
utilizando a pressao multiescala para calcular os fluxos na interface dos mesmos
"""
#0
volumes_in_primal_set = self.mb.tag_get_data(self.volumes_in_primal_tag, 0, flat=True)[0]
volumes_in_primal_set = self.mb.get_entities_by_handle(volumes_in_primal_set)
lim = 10**(-6)
soma = 0
Qc2 = []
prim = []
for primal in self.primals:
#1
Qc = 0
# my_adjs = set()
primal_id1 = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
primal_id = self.ident_primal[primal_id1]
fine_elems_in_primal = self.mb.get_entities_by_handle(primal)
volumes_in_primal = set(fine_elems_in_primal) & set(volumes_in_primal_set)
for volume in volumes_in_primal:
#2
adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
for adj in adjs_vol:
#3
if adj in fine_elems_in_primal:
continue
# my_adjs.add(adj)
pvol = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]
padj = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
centroid_volume = self.mesh_topo_util.get_average_position([volume])
centroid_adj = self.mesh_topo_util.get_average_position([adj])
z_vol = self.tz - centroid_volume[2]
z_adj = self.tz - centroid_adj[2]
direction = centroid_adj - centroid_volume
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kadj = np.dot(np.dot(kadj,uni),uni)
keq = self.kequiv(kvol, kadj)
keq = keq*(np.dot(self.A, uni))/(self.mi) #*np.dot(self.h, uni))
grad_p = (padj - pvol)/float(abs(np.dot(direction, uni)))
grad_z = (z_adj - z_vol)/float(abs(np.dot(direction, uni)))
q = (grad_p)*keq - grad_z*keq*self.gama
Qc += q
#1
# print('Primal:{0} ///// Qc: {1}'.format(primal_id, Qc))
Qc2.append(Qc)
prim.append(primal_id)
self.mb.tag_set_data(self.flux_coarse_tag, fine_elems_in_primal, np.repeat(Qc, len(fine_elems_in_primal)))
# if Qc > lim:
# print('Qc nao deu zero')
# import pdb; pdb.set_trace()
with open('Qc_gr.txt', 'w') as arq:
for i,j in zip(prim, Qc2):
arq.write('Primal:{0} ///// Qc: {1}\n'.format(i, j))
arq.write('\n')
arq.write('sum Qc:{0}'.format(sum(Qc2)))
def test_conservation_fine(self):
for volume in self.all_fine_vols:
gid_vol = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
pvol = self.mb.tag_get_data(self.pf_tag, volume, flat=True)[0]
sat_vol = self.mb.tag_get_data(self.sat_tag, volume, flat=True)[0]
volume_centroid = self.mesh_topo_util.get_average_position([volume])
adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
lbt_vol = lamb_w_vol + lamb_o_vol
z_vol = self.tz - volume_centroid[2]
print('gidvol')
print(gid_vol)
print('pvol')
print(pvol)
print('zvol')
print(z_vol)
flux_gr = []
qt = 0
print('\n')
for adj in adj_volumes:
padj = self.mb.tag_get_data(self.pf_tag, adj, flat=True)[0]
sat_adj = self.mb.tag_get_data(self.sat_tag, adj, flat=True)[0]
gid_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
sat_adj = self.mb.tag_get_data(self.sat_tag, adj, flat=True)[0]
padj = self.mb.tag_get_data(self.pf_tag, adj, flat=True)[0]
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
centroid_adj = self.mesh_topo_util.get_average_position([adj])
z_adj = self.tz - centroid_adj[2]
print('gidadj')
print(gid_adj)
print('padj')
print(padj)
print('zadj')
print(z_adj)
print('\n')
direction = centroid_adj - volume_centroid
# unit = direction/np.linalg.norm(direction)
# unit = vetor unitario na direcao de direction
uni = self.unitary(direction)
# uni = valor positivo do vetor unitario
kvol = np.dot(np.dot(kvol,uni),uni)
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj, flat=True)[0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj, flat=True)[0]
lbt_adj = lamb_w_adj + lamb_o_adj
keq = self.kequiv(kvol, kadj)*(lbt_vol + lbt_adj)/2.0
keq = keq*(np.dot(self.A, uni)/float(abs(np.dot(direction, uni))))
grad_p = (padj - pvol)
grad_z = (z_adj - z_vol)
print('keq')
print(keq)
print('gradp')
print(grad_p)
print('grad_z')
print(grad_z)
print('grad_p*keq')
print(grad_p*keq)
print('gradz*gama*keq')
print(grad_z*self.gama*keq)
print('gradp*keq - gradz*gama*keq')
print(grad_p*keq - grad_z*self.gama*keq)
print('\n')
qt += (grad_p - grad_z*self.gama)*keq
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
print(gid_vol)
print(qt)
print('\n')
import pdb; pdb.set_trace()
sim_grav_bif = gravidade_bif()
| 45.022518
| 158
| 0.510992
|
913bf0a308c89940898763aacb693c97d4e43e50
| 18,994
|
py
|
Python
|
autocertkit/ack_cli.py
|
zhenzhenc/auto-cert-kit
|
82602c58899e2855078a3ccd03718c68fc38c42b
|
[
"BSD-2-Clause"
] | null | null | null |
autocertkit/ack_cli.py
|
zhenzhenc/auto-cert-kit
|
82602c58899e2855078a3ccd03718c68fc38c42b
|
[
"BSD-2-Clause"
] | null | null | null |
autocertkit/ack_cli.py
|
zhenzhenc/auto-cert-kit
|
82602c58899e2855078a3ccd03718c68fc38c42b
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) Citrix Systems Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification, are permitted provided
# that the following conditions are met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""CLI for generating auto cert kit configuration file"""
# @PRODUCT_VERSION@
# @BUILD_NUMBER@
import utils
import sys
import os
import ConfigParser
import testbase
import inspect
import operator
import itertools
from test_generators import *
from status import check_for_process
import test_report
import test_runner
from optparse import OptionParser
from exceptions import *
MIN_VLAN = 0
MAX_VLAN = 4096
INSTALL_DIR = '/opt/xensource/packages/files/auto-cert-kit'
def get_xapi_session(config):
# Future improvement, implement remote login. For now, just return local
return utils.get_local_xapi_session()
def parse_cmd_args():
parser = OptionParser(
usage="%prog [options]", version="%prog @KIT_VERSION@")
parser.add_option("-d", "--debug",
dest="debug",
action="store_const",
const=True,
default=False,
help="Run in debug mode, exit on failure")
parser.add_option("-t", "--vlantag",
dest="vlan",
default="trunk",
help="Specify a VLAN tag ID for which your switches have been configured")
parser.add_option("-g", "--generate",
dest="generate",
action="store_const",
const=True,
default=False,
help="Generate the config file only. Do not run the tests yet.")
parser.add_option("-l", "--list",
dest="list_tests",
action="store_const",
const=True,
default=False,
help="List all of the test methods")
parser.add_option("-i", "--info",
dest="info",
help="Print out information about a specified test name.")
parser.add_option("-m", "--mode",
dest="mode",
default="ALL",
help="Specify the type of certification you wish to perform. (ALL (default) | NET | LSTOR | CPU | OPS).")
parser.add_option("-e", "--exclude",
dest="exclude",
action="append",
default=[],
help="Exclude one or multiple set of tests. (OVS | BRIDGE | LSTOR | CPU | OPS | CRASH).")
parser.add_option("-n", "--netconf",
dest="netconf",
help="Specify the network config file.")
# The option string is an extension, allowing users to specify KVPs
# e.g. optionstr = "dhcp=True,key1=val1,..."
parser.add_option("-o", "--option",
dest="optionstr",
help="Specify extra options.")
(options, _) = parser.parse_args()
config = {}
config['debug'] = options.debug
if options.vlan:
config['vlan_id'] = options.vlan
if options.generate:
config['generate'] = True
if options.info:
print_documentation(options.info)
if options.netconf:
assert_file_exists(options.netconf, 'Network config')
config['netconf'] = parse_netconf_file(options.netconf)
else:
raise utils.ArgumentError(
"You must specify a network configuration file. %s" % options.mode)
config['mode'] = options.mode
config['exclude'] = options.exclude
utils.log.debug("Test Mode: %s" % options.netconf)
if options.list_tests:
print_all_test_classes()
if options.optionstr:
kvp_rec = kvp_string_to_rec(options.optionstr)
for k, v in kvp_rec.iteritems():
config[k] = v
# Check if files exist
file_opts = [("vpx_dlvm_file", "VPX DLVM file")]
for opt, label in file_opts:
if opt in config.keys():
assert_file_exists(os.path.join(INSTALL_DIR, config[opt]), label)
for key, value in config['netconf'].iteritems():
if key.startswith('eth'):
vf_driver_pkg = value['vf_driver_pkg']
if vf_driver_pkg:
assert_file_exists(os.path.join(
INSTALL_DIR, vf_driver_pkg), "VF driver rpm package")
return config
def kvp_string_to_rec(string):
"""Take an input string 'a=b,c=d,e=f' and return the record
{'a':'b','c':'d','e':'f'}"""
rec = {}
for kvp in string.split(','):
arr = kvp.split('=')
if len(arr) > 2:
raise Exception("Cannot convert %s to KVP" % string)
rec[arr[0]] = arr[1]
return rec
def parse_netconf_file(filename):
"""Parse network config file in ini format
E.g.
[eth0]
network_id = 0
vlan_ids = 200,204,240
vf_driver_name = igbvf
vf_driver_pkg = igbvf-2.3.9.6-1.x86_64.rpm
max_vf_num = 8
[static_0_200]
ip_start = 192.168.0.2
ip_end = 192.168.0.10
netmask = 255.255.255.0
gw = 192.168.0.1
[static_management]
# similar to static_0_200
"""
utils.log.debug("Parse network config file: %s" % filename)
cp = ConfigParser.ConfigParser()
cp.read(filename)
rec = {}
for section in cp.sections():
if section.startswith('eth'):
# Ethernet Interface
utils.log.debug("Ethernet Interface: '%s'" % section)
# Network ID is a label of the physical network the adapter has been connected to
# and should be uniform across all adapters.
network_id = cp.get(section, 'network_id')
utils.log.debug("Network IDs: '%s'" % network_id)
try:
network_id = int(network_id)
except:
raise utils.InvalidArgument('Network IDs for %s' % section, network_id,
'should be integer')
# Parse VLAN IDs
vlan_ids = ""
if cp.has_option(section, 'vlan_ids'):
vlan_ids = cp.get(section, 'vlan_ids')
utils.log.debug("VLAN IDs: '%s'" % vlan_ids)
try:
vlan_ids = [int(id.strip()) for id in vlan_ids.split(',')]
except:
raise utils.InvalidArgument('VLAN IDs for %s' % section, vlan_ids,
'should be integer with comma as delimiter if multiple')
# Ensure that the specified VLAN is valid
for vlan_id in vlan_ids:
if vlan_id > MAX_VLAN or vlan_id < MIN_VLAN:
raise utils.InvalidArgument('VLAN ID for %s' % section, vlan_id, '%d < x < %d' %
(MIN_VLAN, MAX_VLAN))
# VF driver info for SR-IOV test
vf_driver_name = ""
if cp.has_option(section, 'vf_driver_name'):
vf_driver_name = cp.get(section, 'vf_driver_name')
vf_driver_pkg = ""
if cp.has_option(section, 'vf_driver_pkg'):
vf_driver_pkg = cp.get(section, 'vf_driver_pkg')
utils.log.debug("VF Driver Name: '%s'" % vf_driver_name)
utils.log.debug("VF Driver Pkg: '%s'" % vf_driver_pkg)
# User is able to specify maxinum VF number per PF to test
max_vf_num = ""
if cp.has_option(section, 'max_vf_num'):
max_vf_num = cp.get(section, 'max_vf_num')
if max_vf_num:
try:
max_vf_num = int(max_vf_num)
except:
raise utils.InvalidArgument('Maxinum VF number for %s' % section, max_vf_num,
'should be integer')
if max_vf_num <= 1:
raise utils.InvalidArgument('Maxinum VF number for %s' % section, max_vf_num,
'should be greater than 1')
max_vf_num = str(max_vf_num)
utils.log.debug(
"Maxinum VF number per PF to test: '%s'" % max_vf_num)
rec[section] = {'network_id': network_id, 'vlan_ids': vlan_ids,
'vf_driver_name': vf_driver_name, 'vf_driver_pkg': vf_driver_pkg,
'max_vf_num': max_vf_num}
elif section == "static_management":
rec[section] = parse_static_config(cp, section)
elif section.startswith('static'):
# Definition of network properties (e.g. dhcp/static)
arr = section.split('_')
if len(arr) != 3:
raise utils.InvalidArgument('static addressing section', section,
'should be in format of "static_<network_id>_<vlan_id>"')
net = arr[1]
vlan = arr[2]
if not unicode(net.strip()).isdecimal() or not unicode(vlan.strip()).isdecimal():
raise utils.InvalidArgument('static addressing section', section,
'should be valid network and/or vlan to determine')
rec[section] = parse_static_config(cp, section)
else:
raise Exception("Error: Unknown section: '%s'" % section)
return rec
def assert_file_exists(file_name, label):
"""Check whether a file exists, if it doesn't, raise an exception"""
if not os.path.isfile(file_name):
raise utils.ConfigFileNotFound(file_name, label)
def validate_param(value, possibles, arg_name):
"""Ensure that the provided value is one of values in the possibles list"""
if value.upper() not in [string.upper() for string in possibles]:
raise utils.InvalidArgument(arg_name, value, possibles)
def parse_static_config(configParser, section):
"""Parse a ini section specifying static networking config for droid VMs to use."""
utils.log.debug("Read section '%s'" % section)
config = {}
for option in ['ip_start', 'ip_end', 'netmask', 'gw']:
config[option] = configParser.get(section, option)
utils.log.debug("Get option %s = '%s'" % (option, config[option]))
if not config[option]:
raise utils.InvalidArgument(
option, config[option], "Should not be empty!")
return config
def network_interfaces_to_test(session, config):
"""Return a list of all the ethernet devices that must be tested by the
auto cert kit. In turn, each device must be the 'primary' interface,
upon which we run our cert tests."""
# Extract from netconf the network interfaces that the user
# has specified.
ifaces_to_test = [iface.strip() for iface in config['netconf'].keys()
if iface.startswith('eth')]
devices = utils.get_master_network_devices(session)
# Filter the list of devices available on the master by the interfaces
# specified by the caller in their netconf file.
devices_to_test = [dev for dev in devices
if dev['Kernel_name'] in ifaces_to_test]
device_groups_list = []
for key, items in itertools.groupby(devices_to_test, operator.itemgetter('PCI_id')):
device_groups_list.append(list(items))
ifaces = []
for grp in device_groups_list:
dev = grp[0] # we can use any of the devices in the group
ifaces.append(dev['Kernel_name'])
return ifaces
def storage_interfaces_to_test(session):
"""Return a list of all storage interface devices that connected to local
disks and must be tested by the auto cert kit."""
def comp_key(src, dst, key):
return key in src and key in dst and src[key] == dst[key]
# Get all interfaces that has a disk connected.
devices = utils.get_local_storage_info(session)
# Some devices, which can have multiple disks, only need to be tested once.
devices_to_test = []
for device in devices:
for existing in devices_to_test:
if comp_key(device, existing, 'vendor') and \
comp_key(device, existing, 'driver') and \
comp_key(device, existing, 'subclass') and \
comp_key(device, existing, 'class'):
break
if comp_key(device, existing, 'PCI_id'):
break
else:
devices_to_test.append(device)
return devices_to_test
def generate_test_config(session, config, test_run_file):
"""Enumerate hardware on machine and setup test config file"""
doc = minidom.Document()
kit_info_rec = {'version': '@KIT_VERSION@', 'build': '@BUILD_NUMBER@',
'product_version': '@PRODUCT_VERSION@'}
root_node = doc.createElement('automated_certification_kit')
utils.set_dict_attributes(root_node, kit_info_rec)
doc.appendChild(root_node)
global_config_node = doc.createElement('global_config')
utils.set_dict_attributes(global_config_node, config)
root_node.appendChild(global_config_node)
# Create the XML node under which, each device we are testing
# is located.
devices_node = doc.createElement('devices')
root_node.appendChild(devices_node)
# Based on the mode of operation, generate the particular tests
# that the user would like to run.
ifs = network_interfaces_to_test(session, config)
storage_devs = storage_interfaces_to_test(session)
# Take an interface to use for non-networking tests
if not len(ifs):
raise Exception(
"Error: in order to run these tests, you need at least one network defined.")
xml_generators = list(XML_GENERATORS)
# Support the loading of additional tests
try:
import ack_addons
xml_generators.extend(ack_addons.XML_GENERATORS)
except ImportError:
utils.log.debug("No ack_addons module found.")
for gen_cls in xml_generators:
xml_generator = gen_cls(session, config, config[
'mode'], ifs, storage_devs)
xml_generator.append_xml_config(doc, devices_node)
fh = open(test_run_file, 'w')
fh.write(doc.toxml())
fh.close()
@utils.log_exceptions
def pre_flight_checks(session, config):
"""Check for some of the common problems"""
# Check for a run in progress
if check_for_process():
raise Exception(
"Error: An ACK process already exists on this host. Kill all running ACK processes and start the test again.")
# Check for at least two hosts
hosts = session.xenapi.host.get_all()
if len(hosts) < 2:
raise Exception(
"Error: You need to have a pool of at least two hosts to run this kit. Only found %d." % (len(hosts)))
for host in hosts:
ver = utils.get_ack_version(session, host)
if not ver:
raise Exception(
"Error: Both hosts need the Auto Cert Kit installed on them! The kit was not found on %s" % host)
# Check that each host has some storage
for host in hosts:
avail_storage = utils.find_storage_for_host(session, host)
if not avail_storage:
raise Exception("Error: host '%s' has no available storage.")
# Check that we have at least two network adaptors, on the same network
recs = config['netconf']
ifaces = {}
for k, v in recs.iteritems():
if k.startswith('eth'):
ifaces[k] = v['network_id']
utils.log.debug(
"Network interfaces specified in config file: %s" % ifaces.keys())
if 'singlenic' in config.keys() and config['singlenic'] == "true":
utils.log.debug(
"Skipping check for multiple networks, as only a single NIC is being tested")
return
# If less than 2 interfaces, raise an exception
if len(ifaces.keys()) < 2:
raise Exception("Error: the test kit needs at least 2 network interfaces to be defined in your network config file. Only %d were found: %s" % (
len(ifaces.keys()), ifaces.keys()))
# If less than 2 interfaces share the same network, raise an exception
for k, v in ifaces.iteritems():
if ifaces.values().count(v) < 2:
raise Exception("Error: ethernet device %s on network %s is defined in the network config but does not have a matching partner. \
Please review the nework configuration and minumum requirements of this kit." % (k, v))
def main(config, test_run_file):
"""Main routine - assess which tests should be run, and create
output file"""
session = get_xapi_session(config)
# Start Logger
utils.init_ack_logging(session)
utils.log.info("Options: %s" % config)
# Pre checks before running tests
pre_flight_checks(session, config)
config['xs_version'] = utils.get_xenserver_version(session)
config['xcp_version'] = utils.get_xcp_version(session)
generate_test_config(session, config, test_run_file)
if 'generate' in config:
# Generate config file only
utils.log.info("Test file generated")
session.logout()
return "OK"
# cleanup in case previous run did not complete entirely
if utils.pool_wide_cleanup(session):
utils.reboot_normally(session)
# Logout of XAPI session anyway - the test runner will create a new session
# if needed. (We might only be generating).
session.logout()
# Kick off the testrunner
utils.log.info("Starting Test Runner from ACK CLI.")
test_file, output = test_runner.run_tests_from_file(test_run_file)
if __name__ == "__main__":
# Parse Args
config = parse_cmd_args()
# Default config file
test_run_file = 'test_run.conf'
main(config, test_run_file)
| 37.243137
| 151
| 0.613931
|
6865c8652de74d2e0a40d218cd1bd97e75504fbb
| 4,904
|
py
|
Python
|
Grid/grid_overlap_multi_xy_axis.py
|
zhang-xiao-feng/pyecharts-gallery
|
46c286f20372768e4375a34457c0638b0769aeee
|
[
"MIT"
] | 1
|
2021-08-06T07:50:50.000Z
|
2021-08-06T07:50:50.000Z
|
Grid/grid_overlap_multi_xy_axis.py
|
yangzuogang/pyecharts-gallery
|
46c286f20372768e4375a34457c0638b0769aeee
|
[
"MIT"
] | null | null | null |
Grid/grid_overlap_multi_xy_axis.py
|
yangzuogang/pyecharts-gallery
|
46c286f20372768e4375a34457c0638b0769aeee
|
[
"MIT"
] | 1
|
2022-03-10T09:05:44.000Z
|
2022-03-10T09:05:44.000Z
|
from pyecharts import options as opts
from pyecharts.charts import Bar, Grid, Line
bar = (
Bar()
.add_xaxis(["{}月".format(i) for i in range(1, 13)])
.add_yaxis(
"蒸发量",
[2.0, 4.9, 7.0, 23.2, 25.6, 76.7, 135.6, 162.2, 32.6, 20.0, 6.4, 3.3],
yaxis_index=0,
color="#d14a61",
)
.add_yaxis(
"降水量",
[2.6, 5.9, 9.0, 26.4, 28.7, 70.7, 175.6, 182.2, 48.7, 18.8, 6.0, 2.3],
yaxis_index=1,
color="#5793f3",
)
.extend_axis(
yaxis=opts.AxisOpts(
name="蒸发量",
type_="value",
min_=0,
max_=250,
position="right",
axisline_opts=opts.AxisLineOpts(
linestyle_opts=opts.LineStyleOpts(color="#d14a61")
),
axislabel_opts=opts.LabelOpts(formatter="{value} ml"),
)
)
.extend_axis(
yaxis=opts.AxisOpts(
type_="value",
name="温度",
min_=0,
max_=25,
position="left",
axisline_opts=opts.AxisLineOpts(
linestyle_opts=opts.LineStyleOpts(color="#675bba")
),
axislabel_opts=opts.LabelOpts(formatter="{value} °C"),
splitline_opts=opts.SplitLineOpts(
is_show=True, linestyle_opts=opts.LineStyleOpts(opacity=1)
),
)
)
.set_global_opts(
yaxis_opts=opts.AxisOpts(
name="降水量",
min_=0,
max_=250,
position="right",
offset=80,
axisline_opts=opts.AxisLineOpts(
linestyle_opts=opts.LineStyleOpts(color="#5793f3")
),
axislabel_opts=opts.LabelOpts(formatter="{value} ml"),
),
title_opts=opts.TitleOpts(title="Grid-Overlap-多 X/Y 轴示例"),
tooltip_opts=opts.TooltipOpts(trigger="axis", axis_pointer_type="cross"),
legend_opts=opts.LegendOpts(pos_left="25%"),
)
)
line = (
Line()
.add_xaxis(["{}月".format(i) for i in range(1, 13)])
.add_yaxis(
"平均温度",
[2.0, 2.2, 3.3, 4.5, 6.3, 10.2, 20.3, 23.4, 23.0, 16.5, 12.0, 6.2],
yaxis_index=2,
color="#675bba",
label_opts=opts.LabelOpts(is_show=False),
)
)
bar1 = (
Bar()
.add_xaxis(["{}月".format(i) for i in range(1, 13)])
.add_yaxis(
"蒸发量 1",
[2.0, 4.9, 7.0, 23.2, 25.6, 76.7, 135.6, 162.2, 32.6, 20.0, 6.4, 3.3],
color="#d14a61",
xaxis_index=1,
yaxis_index=3,
)
.add_yaxis(
"降水量 2",
[2.6, 5.9, 9.0, 26.4, 28.7, 70.7, 175.6, 182.2, 48.7, 18.8, 6.0, 2.3],
color="#5793f3",
xaxis_index=1,
yaxis_index=3,
)
.extend_axis(
yaxis=opts.AxisOpts(
name="蒸发量",
type_="value",
min_=0,
max_=250,
position="right",
axisline_opts=opts.AxisLineOpts(
linestyle_opts=opts.LineStyleOpts(color="#d14a61")
),
axislabel_opts=opts.LabelOpts(formatter="{value} ml"),
)
)
.extend_axis(
yaxis=opts.AxisOpts(
type_="value",
name="温度",
min_=0,
max_=25,
position="left",
axisline_opts=opts.AxisLineOpts(
linestyle_opts=opts.LineStyleOpts(color="#675bba")
),
axislabel_opts=opts.LabelOpts(formatter="{value} °C"),
splitline_opts=opts.SplitLineOpts(
is_show=True, linestyle_opts=opts.LineStyleOpts(opacity=1)
),
)
)
.set_global_opts(
xaxis_opts=opts.AxisOpts(grid_index=1),
yaxis_opts=opts.AxisOpts(
name="降水量",
min_=0,
max_=250,
position="right",
offset=80,
grid_index=1,
axisline_opts=opts.AxisLineOpts(
linestyle_opts=opts.LineStyleOpts(color="#5793f3")
),
axislabel_opts=opts.LabelOpts(formatter="{value} ml"),
),
tooltip_opts=opts.TooltipOpts(trigger="axis", axis_pointer_type="cross"),
legend_opts=opts.LegendOpts(pos_left="65%"),
)
)
line1 = (
Line()
.add_xaxis(["{}月".format(i) for i in range(1, 13)])
.add_yaxis(
"平均温度 1",
[2.0, 2.2, 3.3, 4.5, 6.3, 10.2, 20.3, 23.4, 23.0, 16.5, 12.0, 6.2],
color="#675bba",
label_opts=opts.LabelOpts(is_show=False),
xaxis_index=1,
yaxis_index=5,
)
)
overlap_1 = bar.overlap(line)
overlap_2 = bar1.overlap(line1)
grid = (
Grid(init_opts=opts.InitOpts(width="1200px", height="800px"))
.add(
overlap_1, grid_opts=opts.GridOpts(pos_right="58%"), is_control_axis_index=True
)
.add(overlap_2, grid_opts=opts.GridOpts(pos_left="58%"), is_control_axis_index=True)
.render("grid_overlap_multi_xy_axis.html")
)
| 29.365269
| 88
| 0.521615
|
44e893e60b2ec2e17764b76f4d81f5d019d24f88
| 6,665
|
py
|
Python
|
userbot/plugins/afk.py
|
NoobRider/catuserbot
|
dea79d5d8b7174efefcc1c35ed3434516a490f58
|
[
"MIT"
] | 2
|
2020-04-12T11:51:06.000Z
|
2020-04-18T14:08:06.000Z
|
userbot/plugins/afk.py
|
NoobRider/catuserbot
|
dea79d5d8b7174efefcc1c35ed3434516a490f58
|
[
"MIT"
] | null | null | null |
userbot/plugins/afk.py
|
NoobRider/catuserbot
|
dea79d5d8b7174efefcc1c35ed3434516a490f58
|
[
"MIT"
] | 1
|
2020-05-18T10:46:56.000Z
|
2020-05-18T10:46:56.000Z
|
"""AFK Plugin for @UniBorg
Syntax: .afk REASON"""
import asyncio
import datetime
from datetime import datetime
from telethon import events
from telethon.tl import functions, types
from userbot import CMD_HELP
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
global afk_start
global afk_end
USER_AFK = {}
afk_time = None
last_afk_message = {}
afk_start = {}
@borg.on(events.NewMessage(pattern=r"\.afk ?(.*)", outgoing=True)) # pylint:disable=E0602
async def _(event):
if event.fwd_from:
return
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
global afk_start
global afk_end
global reason
USER_AFK = {}
afk_time = None
last_afk_message = {}
afk_end = {}
start_1 = datetime.now()
afk_start = start_1.replace(microsecond=0)
reason = event.pattern_match.group(1)
if not USER_AFK: # pylint:disable=E0602
last_seen_status = await borg( # pylint:disable=E0602
functions.account.GetPrivacyRequest(
types.InputPrivacyKeyStatusTimestamp()
)
)
if isinstance(last_seen_status.rules, types.PrivacyValueAllowAll):
afk_time = datetime.datetime.now() # pylint:disable=E0602
USER_AFK = f"yes: {reason}" # pylint:disable=E0602
if reason:
await borg.send_message(event.chat_id, f"**I shall be Going afk!** __because ~ {reason}__")
else:
await borg.send_message(event.chat_id, f"**I am Going afk!**")
await asyncio.sleep(5)
await event.delete()
try:
await borg.send_message( # pylint:disable=E0602
Config.PRIVATE_GROUP_BOT_API_ID, # pylint:disable=E0602
f"Set AFK mode to True, and Reason is {reason}"
)
except Exception as e: # pylint:disable=C0103,W0703
logger.warn(str(e)) # pylint:disable=E0602
@borg.on(events.NewMessage(outgoing=True)) # pylint:disable=E0602
async def set_not_afk(event):
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
global afk_start
global afk_end
back_alive = datetime.now()
afk_end = back_alive.replace(microsecond=0)
if afk_start != {}:
total_afk_time = str((afk_end - afk_start))
current_message = event.message.message
if ".afk" not in current_message and "yes" in USER_AFK: # pylint:disable=E0602
shite = await borg.send_message(event.chat_id, "__Back alive!__\n**No Longer afk.**\n `Was afk for:``" + total_afk_time + "`")
try:
await borg.send_message( # pylint:disable=E0602
Config.PRIVATE_GROUP_BOT_API_ID, # pylint:disable=E0602
"Set AFK mode to False"
)
except Exception as e: # pylint:disable=C0103,W0703
await borg.send_message( # pylint:disable=E0602
event.chat_id,
"Please set `PRIVATE_GROUP_BOT_API_ID` " + \
"for the proper functioning of afk functionality " + \
"check pinned message in @xtragbot.\n\n `{}`".format(str(e)),
reply_to=event.message.id,
silent=True
)
await asyncio.sleep(5)
await shite.delete()
USER_AFK = {} # pylint:disable=E0602
afk_time = None # pylint:disable=E0602
@borg.on(events.NewMessage( # pylint:disable=E0602
incoming=True,
func=lambda e: bool(e.mentioned or e.is_private)
))
async def on_afk(event):
if event.fwd_from:
return
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
global afk_start
global afk_end
back_alivee = datetime.now()
afk_end = back_alivee.replace(microsecond=0)
if afk_start != {}:
total_afk_time = str((afk_end - afk_start))
afk_since = "**a while ago**"
current_message_text = event.message.message.lower()
if "afk" in current_message_text:
# userbot's should not reply to other userbot's
# https://core.telegram.org/bots/faq#why-doesn-39t-my-bot-see-messages-from-other-bots
return False
if USER_AFK and not (await event.get_sender()).bot: # pylint:disable=E0602
if afk_time: # pylint:disable=E0602
now = datetime.datetime.now()
datime_since_afk = now - afk_time # pylint:disable=E0602
time = float(datime_since_afk.seconds)
days = time // (24 * 3600)
time = time % (24 * 3600)
hours = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
if days == 1:
afk_since = "**Yesterday**"
elif days > 1:
if days > 6:
date = now + \
datetime.timedelta(
days=-days, hours=-hours, minutes=-minutes)
afk_since = date.strftime("%A, %Y %B %m, %H:%I")
else:
wday = now + datetime.timedelta(days=-days)
afk_since = wday.strftime('%A')
elif hours > 1:
afk_since = f"`{int(hours)}h{int(minutes)}m` **ago**"
elif minutes > 0:
afk_since = f"`{int(minutes)}m{int(seconds)}s` **ago**"
else:
afk_since = f"`{int(seconds)}s` **ago**"
msg = None
message_to_reply = f"__My Master Has Been Gone For__ `{total_afk_time}`\nWhere He Is: ONLY GOD KNOWS " + \
f"\n\n__I promise I'll back in a few light years__\n**REASON**: {reason}" \
if reason \
else f"**Heya!**\n__I am currently unavailable. Since when, you ask? For {total_afk_time} I guess.__\n\nWhen will I be back? Soon __Whenever I feel like it__**( ಠ ʖ̯ ಠ)** "
msg = await event.reply(message_to_reply)
await asyncio.sleep(5)
if event.chat_id in last_afk_message: # pylint:disable=E0602
await last_afk_message[event.chat_id].delete() # pylint:disable=E0602
last_afk_message[event.chat_id] = msg # pylint:disable=E0602
CMD_HELP.update({
"afk":
".afk [Optional Reason]\
\nUsage: Sets you as afk.\nReplies to anyone who tags/PM's \
you telling them that you are AFK(reason).\n\nSwitches off AFK when you type back anything, anywhere.\
\n afk full form away from keyboard/keypad.\
"
})
| 39.672619
| 185
| 0.611253
|
4a7bf5312a6fcf888e9c41ef32ffd1a0192c66f5
| 10,475
|
py
|
Python
|
api/clouds/gdrive.py
|
h2020-westlife-eu/VRE
|
a85d5370767939b1971415be48a551ae6b1edc5d
|
[
"MIT"
] | 1
|
2016-06-28T13:13:27.000Z
|
2016-06-28T13:13:27.000Z
|
api/clouds/gdrive.py
|
h2020-westlife-eu/VRE
|
a85d5370767939b1971415be48a551ae6b1edc5d
|
[
"MIT"
] | 12
|
2016-06-28T11:19:46.000Z
|
2017-05-05T14:24:14.000Z
|
api/clouds/gdrive.py
|
h2020-westlife-eu/VRE
|
a85d5370767939b1971415be48a551ae6b1edc5d
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# Copyright Luna Technology 2015
# Matthieu Riviere <mriviere@luna-technology.com>
import os
import tempfile
import traceback
from apiclient.discovery import build
import httplib2
from oauth2client import client
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from django.conf import settings
from api.data import DATAFILE_READY
from api.models import Folder, Datafile
SCOPES = 'https://www.googleapis.com/auth/drive'
# CLIENT_SECRET_FILE = os.path.join(os.path.dirname(__file__), 'client_secret.json')
APPLICATION_NAME = 'Pype (dev)'
REDIRECT_URI = 'http://dev.pype.com:8000/api/gdriveproviders/confirm_link.json/'
AUTH_URI = "https://accounts.google.com/o/oauth2/auth"
TOKEN_URI = "https://accounts.google.com/o/oauth2/token"
FOLDER_MIMETYPE = "application/vnd.google-apps.folder"
def get_flow(state=None):
client_id = settings.GOOGLE_DRIVE_CLIENT_ID
client_secret = settings.GOOGLE_DRIVE_CLIENT_SECRET
redirect_uri = settings.GOOGLE_DRIVE_CLIENT_URI
flow = client.OAuth2WebServerFlow(
client_id,
client_secret=client_secret,
scope=SCOPES,
redirect_uri=redirect_uri,
auth_uri=AUTH_URI,
token_uri=TOKEN_URI
)
flow.params['access_type'] = 'offline'
flow.params['approval_prompt'] = 'force'
if state is not None:
flow.params['state'] = state
return flow
def step1_get_authorize_url(state=None):
flow = get_flow(state=state)
auth_uri = flow.step1_get_authorize_url()
return auth_uri
def step2_redeem_code_for_credentials(code):
flow = get_flow()
credentials = flow.step2_exchange(code)
return credentials.to_json()
def step3_get_drive_service(credentials_json):
credentials = client.OAuth2Credentials.from_json(credentials_json)
http_auth = credentials.authorize(httplib2.Http())
drive_service = build('drive', 'v2', http=http_auth)
return drive_service
def get_pydrive_object(provider):
credentials = client.OAuth2Credentials.from_json(provider.credentials)
gauth = GoogleAuth()
gauth.credentials = credentials
if gauth.access_token_expired:
print('Google Drive token expired. Refreshing...')
gauth.Refresh()
provider.credentials = gauth.credentials.to_json()
provider.save()
gauth.Authorize()
drive = GoogleDrive(gauth)
return drive
def check_gdrive_credentials(provider):
drive = get_pydrive_object(provider)
drive.ListFile({'q': "'root' in parents and trashed=false"}).GetList()
def upload_file_to_gdrive(datafile, provider, tempfile_path):
drive = get_pydrive_object(provider)
file1 = drive.CreateFile({
'title': datafile.filename,
'parents': [{'id': datafile.folder.storage_key}]
})
file1.SetContentFile(tempfile_path)
file1.Upload()
return file1['id']
def create_folder_on_gdrive(folder, provider):
drive = get_pydrive_object(provider)
file1 = drive.CreateFile({
'title': folder.name,
'mimeType': FOLDER_MIMETYPE,
'parents': [{'id': folder.parent.storage_key}]
})
file1.Upload()
return file1['id']
def retrieve_file_from_gdrive(datafile, provider):
drive = get_pydrive_object(provider)
try:
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
tmpfilename = tmpfile.name
print('Using storage_key: %s' % datafile.storage_key)
file1 = drive.CreateFile({'id': datafile.storage_key})
print('Detected title is: %s' % file1['title'])
file1.GetContentFile(tmpfilename)
return tmpfilename
except:
print('Download failed: %s' % traceback.format_exc())
return None
def delete_file_from_gdrive(provider, storage_key):
drive = get_pydrive_object(provider)
try:
drive.auth.service.files().trash(fileId=storage_key).execute()
return True
except:
print('Delete failed: %s' % traceback.format_exc())
return False
def get_children(drive, parent_id=None):
children = []
if parent_id is None or parent_id == 'root':
query = "('root' in parents or sharedWithMe) and trashed=false"
else:
query = "'%s' in parents and trashed=false" % parent_id
for f in drive.ListFile({'q': query}).GetList():
children.append(f)
return children
def get_children_from_filelist(files, parent_id=None):
if parent_id is None or parent_id == 'root':
def filter(f):
if len(f['parents']) == 0:
return True
for parent in f['parents']:
if parent['isRoot']:
return True
return False
else:
def filter(f):
for parent in f['parents']:
if parent['id'] == parent_id:
return True
return False
return [f for f in files if filter(f)]
def get_all_files(drive):
files = []
query = 'trashed=false'
for f in drive.ListFile({'q': query}).GetList():
files.append(f)
return files
def update_quota(provider, drive):
try:
quota = drive.auth.service.about().get().execute()['quotaBytesTotal']
except:
print('Could not get quota from Google Drive:')
traceback.print_exc()
else:
provider.quota_bytes = quota
provider.save()
def resync_from_gdrive(provider):
drive = get_pydrive_object(provider)
def update_or_create_file(file_data, parent_folder, child_files_map):
if file_data['id'] in child_files_map.keys():
file1 = child_files_map[file_data['id']]
if file1.filename != file_data['title']:
file1.name = file_data['title']
file1.save()
try:
size = int(file_data['fileSize'])
external_link = ''
except (KeyError, ValueError):
size = None
try:
external_link = file_data['alternateLink']
except:
print(file_data)
raise
if file1.size != size or file1.external_link != external_link:
file1.size = size
file1.external_link = external_link
file1.save()
del(child_files_map[file_data['id']])
else:
# The file isn't present locally. Create it
file1 = Datafile()
file1.owner = provider.owner
file1.folder = parent_folder
file1.filename = file_data['title']
file1.upload_state = DATAFILE_READY
file1.storage_key = file_data['id']
try:
file1.size = int(file_data['fileSize'])
except (KeyError, ValueError):
file1.size = None
file1.external_link = file_data['alternateLink']
file1.save()
return file1
def update_or_create_folder(folder_data, parent_folder, child_folders_map):
# Check that the folder is present on both sides
if folder_data['id'] in child_folders_map.keys():
folder = child_folders_map[folder_data['id']]
if folder.name != folder_data['title']:
folder.name = folder_data['title']
folder.save()
del(child_folders_map[folder_data['id']])
else:
# The folder isn't present locally. Create it
folder = Folder()
folder.owner = provider.owner
folder.parent = parent_folder
folder.name = folder_data['title']
folder.storage_account = provider
folder.storage_key = folder_data['id']
folder.save()
return folder
def sync_folder_with_drive_2(all_files, local_folder):
folder_id = local_folder.storage_key
child_folders = list(Folder.objects.filter(parent=local_folder))
child_folders_map = {}
for child_folder in child_folders:
child_folders_map[child_folder.storage_key] = child_folder
child_files = list(Datafile.objects.filter(folder=local_folder))
child_files_map = {}
for child_file in child_files:
child_files_map[child_file.storage_key] = child_file
for c in get_children_from_filelist(all_files, parent_id=folder_id):
if c['mimeType'] == FOLDER_MIMETYPE:
folder = update_or_create_folder(c, local_folder, child_folders_map)
sync_folder_with_drive_2(all_files, folder)
else:
update_or_create_file(c, local_folder, child_files_map)
# Do we have any files on the local side that aren't on the distant side
# anymore?
for rem_folder_key in child_folders_map:
rem_folder = child_folders_map[rem_folder_key]
rem_folder.delete()
for rem_file_key in child_files_map:
rem_file = child_files_map[rem_file_key]
rem_file.delete()
def sync_folder_with_drive(local_folder):
folder_id = local_folder.storage_key
child_folders = list(Folder.objects.filter(parent=local_folder))
child_folders_map = {}
for child_folder in child_folders:
child_folders_map[child_folder.storage_key] = child_folder
child_files = list(Datafile.objects.filter(folder=local_folder))
child_files_map = {}
for child_file in child_files:
child_files_map[child_file.storage_key] = child_file
for c in get_children(drive, folder_id):
if c['mimeType'] == FOLDER_MIMETYPE:
folder = update_or_create_folder(c, local_folder, child_folders_map)
sync_folder_with_drive(folder)
else:
update_or_create_file(c, local_folder, child_files_map)
# Do we have any files on the local side that aren't on the distant side
# anymore?
for rem_folder_key in child_folders_map:
rem_folder = child_folders_map[rem_folder_key]
rem_folder.delete()
for rem_file_key in child_files_map:
rem_file = child_files_map[rem_file_key]
rem_file.delete()
all_files = get_all_files(drive)
root_folder = Folder.objects.get(storage_account=provider, parent=None)
sync_folder_with_drive_2(all_files, root_folder)
#update_quota(provider, drive)
| 29.843305
| 84
| 0.641718
|
caee7f5f7d01177e3933bab5bc2181e592dee0b9
| 27,776
|
py
|
Python
|
milvus/grpc_gen/common_pb2.py
|
fishpenguin/pymilvus
|
4e73e87e3d8b6317628f2bebaf1e48eddaa05dba
|
[
"Apache-2.0"
] | null | null | null |
milvus/grpc_gen/common_pb2.py
|
fishpenguin/pymilvus
|
4e73e87e3d8b6317628f2bebaf1e48eddaa05dba
|
[
"Apache-2.0"
] | null | null | null |
milvus/grpc_gen/common_pb2.py
|
fishpenguin/pymilvus
|
4e73e87e3d8b6317628f2bebaf1e48eddaa05dba
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: common.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='common.proto',
package='milvus.proto.common',
syntax='proto3',
serialized_options=_b('Z3github.com/milvus-io/milvus/internal/proto/commonpb'),
serialized_pb=_b('\n\x0c\x63ommon.proto\x12\x13milvus.proto.common\"L\n\x06Status\x12\x32\n\nerror_code\x18\x01 \x01(\x0e\x32\x1e.milvus.proto.common.ErrorCode\x12\x0e\n\x06reason\x18\x02 \x01(\t\"*\n\x0cKeyValuePair\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\x15\n\x04\x42lob\x12\r\n\x05value\x18\x01 \x01(\x0c\"#\n\x07\x41\x64\x64ress\x12\n\n\x02ip\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\x03\"m\n\x07MsgBase\x12.\n\x08msg_type\x18\x01 \x01(\x0e\x32\x1c.milvus.proto.common.MsgType\x12\r\n\x05msgID\x18\x02 \x01(\x03\x12\x11\n\ttimestamp\x18\x03 \x01(\x04\x12\x10\n\x08sourceID\x18\x04 \x01(\x03\"7\n\tMsgHeader\x12*\n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x1c.milvus.proto.common.MsgBase*\xa5\x04\n\tErrorCode\x12\x0b\n\x07Success\x10\x00\x12\x13\n\x0fUnexpectedError\x10\x01\x12\x11\n\rConnectFailed\x10\x02\x12\x14\n\x10PermissionDenied\x10\x03\x12\x17\n\x13\x43ollectionNotExists\x10\x04\x12\x13\n\x0fIllegalArgument\x10\x05\x12\x14\n\x10IllegalDimension\x10\x07\x12\x14\n\x10IllegalIndexType\x10\x08\x12\x19\n\x15IllegalCollectionName\x10\t\x12\x0f\n\x0bIllegalTOPK\x10\n\x12\x14\n\x10IllegalRowRecord\x10\x0b\x12\x13\n\x0fIllegalVectorID\x10\x0c\x12\x17\n\x13IllegalSearchResult\x10\r\x12\x10\n\x0c\x46ileNotFound\x10\x0e\x12\x0e\n\nMetaFailed\x10\x0f\x12\x0f\n\x0b\x43\x61\x63heFailed\x10\x10\x12\x16\n\x12\x43\x61nnotCreateFolder\x10\x11\x12\x14\n\x10\x43\x61nnotCreateFile\x10\x12\x12\x16\n\x12\x43\x61nnotDeleteFolder\x10\x13\x12\x14\n\x10\x43\x61nnotDeleteFile\x10\x14\x12\x13\n\x0f\x42uildIndexError\x10\x15\x12\x10\n\x0cIllegalNLIST\x10\x16\x12\x15\n\x11IllegalMetricType\x10\x17\x12\x0f\n\x0bOutOfMemory\x10\x18\x12\x11\n\rIndexNotExist\x10\x19\x12\x12\n\rDDRequestRace\x10\xe8\x07*X\n\nIndexState\x12\x12\n\x0eIndexStateNone\x10\x00\x12\x0c\n\x08Unissued\x10\x01\x12\x0e\n\nInProgress\x10\x02\x12\x0c\n\x08\x46inished\x10\x03\x12\n\n\x06\x46\x61iled\x10\x04*X\n\x0cSegmentState\x12\x14\n\x10SegmentStateNone\x10\x00\x12\x0c\n\x08NotExist\x10\x01\x12\x0b\n\x07Growing\x10\x02\x12\n\n\x06Sealed\x10\x03\x12\x0b\n\x07\x46lushed\x10\x04*\xba\x06\n\x07MsgType\x12\r\n\tUndefined\x10\x00\x12\x14\n\x10\x43reateCollection\x10\x64\x12\x12\n\x0e\x44ropCollection\x10\x65\x12\x11\n\rHasCollection\x10\x66\x12\x16\n\x12\x44\x65scribeCollection\x10g\x12\x13\n\x0fShowCollections\x10h\x12\x14\n\x10GetSystemConfigs\x10i\x12\x12\n\x0eLoadCollection\x10j\x12\x15\n\x11ReleaseCollection\x10k\x12\x14\n\x0f\x43reatePartition\x10\xc8\x01\x12\x12\n\rDropPartition\x10\xc9\x01\x12\x11\n\x0cHasPartition\x10\xca\x01\x12\x16\n\x11\x44\x65scribePartition\x10\xcb\x01\x12\x13\n\x0eShowPartitions\x10\xcc\x01\x12\x13\n\x0eLoadPartitions\x10\xcd\x01\x12\x16\n\x11ReleasePartitions\x10\xce\x01\x12\x11\n\x0cShowSegments\x10\xfa\x01\x12\x14\n\x0f\x44\x65scribeSegment\x10\xfb\x01\x12\x10\n\x0b\x43reateIndex\x10\xac\x02\x12\x12\n\rDescribeIndex\x10\xad\x02\x12\x0e\n\tDropIndex\x10\xae\x02\x12\x0b\n\x06Insert\x10\x90\x03\x12\x0b\n\x06\x44\x65lete\x10\x91\x03\x12\n\n\x05\x46lush\x10\x92\x03\x12\x0b\n\x06Search\x10\xf4\x03\x12\x11\n\x0cSearchResult\x10\xf5\x03\x12\x12\n\rGetIndexState\x10\xf6\x03\x12\x1a\n\x15GetIndexBuildProgress\x10\xf7\x03\x12\x1c\n\x17GetCollectionStatistics\x10\xf8\x03\x12\x1b\n\x16GetPartitionStatistics\x10\xf9\x03\x12\r\n\x08Retrieve\x10\xfa\x03\x12\x13\n\x0eRetrieveResult\x10\xfb\x03\x12\x10\n\x0bSegmentInfo\x10\xd8\x04\x12\r\n\x08TimeTick\x10\xb0\t\x12\x13\n\x0eQueryNodeStats\x10\xb1\t\x12\x0e\n\tLoadIndex\x10\xb2\t\x12\x0e\n\tRequestID\x10\xb3\t\x12\x0f\n\nRequestTSO\x10\xb4\t\x12\x14\n\x0f\x41llocateSegment\x10\xb5\t\x12\x16\n\x11SegmentStatistics\x10\xb6\t\x12\x15\n\x10SegmentFlushDone\x10\xb7\t*\"\n\x07\x44slType\x12\x07\n\x03\x44sl\x10\x00\x12\x0e\n\nBoolExprV1\x10\x01\x42\x35Z3github.com/milvus-io/milvus/internal/proto/commonpbb\x06proto3')
)
_ERRORCODE = _descriptor.EnumDescriptor(
name='ErrorCode',
full_name='milvus.proto.common.ErrorCode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='Success', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UnexpectedError', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ConnectFailed', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PermissionDenied', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CollectionNotExists', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IllegalArgument', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IllegalDimension', index=6, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IllegalIndexType', index=7, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IllegalCollectionName', index=8, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IllegalTOPK', index=9, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IllegalRowRecord', index=10, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IllegalVectorID', index=11, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IllegalSearchResult', index=12, number=13,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FileNotFound', index=13, number=14,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MetaFailed', index=14, number=15,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CacheFailed', index=15, number=16,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CannotCreateFolder', index=16, number=17,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CannotCreateFile', index=17, number=18,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CannotDeleteFolder', index=18, number=19,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CannotDeleteFile', index=19, number=20,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BuildIndexError', index=20, number=21,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IllegalNLIST', index=21, number=22,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IllegalMetricType', index=22, number=23,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OutOfMemory', index=23, number=24,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IndexNotExist', index=24, number=25,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DDRequestRace', index=25, number=1000,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=388,
serialized_end=937,
)
_sym_db.RegisterEnumDescriptor(_ERRORCODE)
ErrorCode = enum_type_wrapper.EnumTypeWrapper(_ERRORCODE)
_INDEXSTATE = _descriptor.EnumDescriptor(
name='IndexState',
full_name='milvus.proto.common.IndexState',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='IndexStateNone', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Unissued', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='InProgress', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Finished', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Failed', index=4, number=4,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=939,
serialized_end=1027,
)
_sym_db.RegisterEnumDescriptor(_INDEXSTATE)
IndexState = enum_type_wrapper.EnumTypeWrapper(_INDEXSTATE)
_SEGMENTSTATE = _descriptor.EnumDescriptor(
name='SegmentState',
full_name='milvus.proto.common.SegmentState',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SegmentStateNone', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NotExist', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Growing', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Sealed', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Flushed', index=4, number=4,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1029,
serialized_end=1117,
)
_sym_db.RegisterEnumDescriptor(_SEGMENTSTATE)
SegmentState = enum_type_wrapper.EnumTypeWrapper(_SEGMENTSTATE)
_MSGTYPE = _descriptor.EnumDescriptor(
name='MsgType',
full_name='milvus.proto.common.MsgType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='Undefined', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CreateCollection', index=1, number=100,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DropCollection', index=2, number=101,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HasCollection', index=3, number=102,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DescribeCollection', index=4, number=103,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ShowCollections', index=5, number=104,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GetSystemConfigs', index=6, number=105,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LoadCollection', index=7, number=106,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ReleaseCollection', index=8, number=107,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CreatePartition', index=9, number=200,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DropPartition', index=10, number=201,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HasPartition', index=11, number=202,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DescribePartition', index=12, number=203,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ShowPartitions', index=13, number=204,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LoadPartitions', index=14, number=205,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ReleasePartitions', index=15, number=206,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ShowSegments', index=16, number=250,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DescribeSegment', index=17, number=251,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CreateIndex', index=18, number=300,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DescribeIndex', index=19, number=301,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DropIndex', index=20, number=302,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Insert', index=21, number=400,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Delete', index=22, number=401,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Flush', index=23, number=402,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Search', index=24, number=500,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SearchResult', index=25, number=501,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GetIndexState', index=26, number=502,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GetIndexBuildProgress', index=27, number=503,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GetCollectionStatistics', index=28, number=504,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GetPartitionStatistics', index=29, number=505,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Retrieve', index=30, number=506,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RetrieveResult', index=31, number=507,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SegmentInfo', index=32, number=600,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TimeTick', index=33, number=1200,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QueryNodeStats', index=34, number=1201,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LoadIndex', index=35, number=1202,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RequestID', index=36, number=1203,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RequestTSO', index=37, number=1204,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AllocateSegment', index=38, number=1205,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SegmentStatistics', index=39, number=1206,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SegmentFlushDone', index=40, number=1207,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1120,
serialized_end=1946,
)
_sym_db.RegisterEnumDescriptor(_MSGTYPE)
MsgType = enum_type_wrapper.EnumTypeWrapper(_MSGTYPE)
_DSLTYPE = _descriptor.EnumDescriptor(
name='DslType',
full_name='milvus.proto.common.DslType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='Dsl', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BoolExprV1', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1948,
serialized_end=1982,
)
_sym_db.RegisterEnumDescriptor(_DSLTYPE)
DslType = enum_type_wrapper.EnumTypeWrapper(_DSLTYPE)
Success = 0
UnexpectedError = 1
ConnectFailed = 2
PermissionDenied = 3
CollectionNotExists = 4
IllegalArgument = 5
IllegalDimension = 7
IllegalIndexType = 8
IllegalCollectionName = 9
IllegalTOPK = 10
IllegalRowRecord = 11
IllegalVectorID = 12
IllegalSearchResult = 13
FileNotFound = 14
MetaFailed = 15
CacheFailed = 16
CannotCreateFolder = 17
CannotCreateFile = 18
CannotDeleteFolder = 19
CannotDeleteFile = 20
BuildIndexError = 21
IllegalNLIST = 22
IllegalMetricType = 23
OutOfMemory = 24
IndexNotExist = 25
DDRequestRace = 1000
IndexStateNone = 0
Unissued = 1
InProgress = 2
Finished = 3
Failed = 4
SegmentStateNone = 0
NotExist = 1
Growing = 2
Sealed = 3
Flushed = 4
Undefined = 0
CreateCollection = 100
DropCollection = 101
HasCollection = 102
DescribeCollection = 103
ShowCollections = 104
GetSystemConfigs = 105
LoadCollection = 106
ReleaseCollection = 107
CreatePartition = 200
DropPartition = 201
HasPartition = 202
DescribePartition = 203
ShowPartitions = 204
LoadPartitions = 205
ReleasePartitions = 206
ShowSegments = 250
DescribeSegment = 251
CreateIndex = 300
DescribeIndex = 301
DropIndex = 302
Insert = 400
Delete = 401
Flush = 402
Search = 500
SearchResult = 501
GetIndexState = 502
GetIndexBuildProgress = 503
GetCollectionStatistics = 504
GetPartitionStatistics = 505
Retrieve = 506
RetrieveResult = 507
SegmentInfo = 600
TimeTick = 1200
QueryNodeStats = 1201
LoadIndex = 1202
RequestID = 1203
RequestTSO = 1204
AllocateSegment = 1205
SegmentStatistics = 1206
SegmentFlushDone = 1207
Dsl = 0
BoolExprV1 = 1
_STATUS = _descriptor.Descriptor(
name='Status',
full_name='milvus.proto.common.Status',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='error_code', full_name='milvus.proto.common.Status.error_code', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='milvus.proto.common.Status.reason', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=37,
serialized_end=113,
)
_KEYVALUEPAIR = _descriptor.Descriptor(
name='KeyValuePair',
full_name='milvus.proto.common.KeyValuePair',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='milvus.proto.common.KeyValuePair.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='milvus.proto.common.KeyValuePair.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=115,
serialized_end=157,
)
_BLOB = _descriptor.Descriptor(
name='Blob',
full_name='milvus.proto.common.Blob',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='milvus.proto.common.Blob.value', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=159,
serialized_end=180,
)
_ADDRESS = _descriptor.Descriptor(
name='Address',
full_name='milvus.proto.common.Address',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ip', full_name='milvus.proto.common.Address.ip', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='port', full_name='milvus.proto.common.Address.port', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=182,
serialized_end=217,
)
_MSGBASE = _descriptor.Descriptor(
name='MsgBase',
full_name='milvus.proto.common.MsgBase',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='msg_type', full_name='milvus.proto.common.MsgBase.msg_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='msgID', full_name='milvus.proto.common.MsgBase.msgID', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timestamp', full_name='milvus.proto.common.MsgBase.timestamp', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sourceID', full_name='milvus.proto.common.MsgBase.sourceID', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=219,
serialized_end=328,
)
_MSGHEADER = _descriptor.Descriptor(
name='MsgHeader',
full_name='milvus.proto.common.MsgHeader',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='base', full_name='milvus.proto.common.MsgHeader.base', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=330,
serialized_end=385,
)
_STATUS.fields_by_name['error_code'].enum_type = _ERRORCODE
_MSGBASE.fields_by_name['msg_type'].enum_type = _MSGTYPE
_MSGHEADER.fields_by_name['base'].message_type = _MSGBASE
DESCRIPTOR.message_types_by_name['Status'] = _STATUS
DESCRIPTOR.message_types_by_name['KeyValuePair'] = _KEYVALUEPAIR
DESCRIPTOR.message_types_by_name['Blob'] = _BLOB
DESCRIPTOR.message_types_by_name['Address'] = _ADDRESS
DESCRIPTOR.message_types_by_name['MsgBase'] = _MSGBASE
DESCRIPTOR.message_types_by_name['MsgHeader'] = _MSGHEADER
DESCRIPTOR.enum_types_by_name['ErrorCode'] = _ERRORCODE
DESCRIPTOR.enum_types_by_name['IndexState'] = _INDEXSTATE
DESCRIPTOR.enum_types_by_name['SegmentState'] = _SEGMENTSTATE
DESCRIPTOR.enum_types_by_name['MsgType'] = _MSGTYPE
DESCRIPTOR.enum_types_by_name['DslType'] = _DSLTYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Status = _reflection.GeneratedProtocolMessageType('Status', (_message.Message,), {
'DESCRIPTOR' : _STATUS,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:milvus.proto.common.Status)
})
_sym_db.RegisterMessage(Status)
KeyValuePair = _reflection.GeneratedProtocolMessageType('KeyValuePair', (_message.Message,), {
'DESCRIPTOR' : _KEYVALUEPAIR,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:milvus.proto.common.KeyValuePair)
})
_sym_db.RegisterMessage(KeyValuePair)
Blob = _reflection.GeneratedProtocolMessageType('Blob', (_message.Message,), {
'DESCRIPTOR' : _BLOB,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:milvus.proto.common.Blob)
})
_sym_db.RegisterMessage(Blob)
Address = _reflection.GeneratedProtocolMessageType('Address', (_message.Message,), {
'DESCRIPTOR' : _ADDRESS,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:milvus.proto.common.Address)
})
_sym_db.RegisterMessage(Address)
MsgBase = _reflection.GeneratedProtocolMessageType('MsgBase', (_message.Message,), {
'DESCRIPTOR' : _MSGBASE,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:milvus.proto.common.MsgBase)
})
_sym_db.RegisterMessage(MsgBase)
MsgHeader = _reflection.GeneratedProtocolMessageType('MsgHeader', (_message.Message,), {
'DESCRIPTOR' : _MSGHEADER,
'__module__' : 'common_pb2'
# @@protoc_insertion_point(class_scope:milvus.proto.common.MsgHeader)
})
_sym_db.RegisterMessage(MsgHeader)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 35.248731
| 3,809
| 0.730019
|
a614822dffae3b980d1275df12c2b261f70f1279
| 610
|
py
|
Python
|
CF_Functions/Arcade/boxblur.py
|
glickmac/Misc_Scripts
|
7e18be79b84a309a1e79935f4470ea915141938d
|
[
"MIT"
] | null | null | null |
CF_Functions/Arcade/boxblur.py
|
glickmac/Misc_Scripts
|
7e18be79b84a309a1e79935f4470ea915141938d
|
[
"MIT"
] | null | null | null |
CF_Functions/Arcade/boxblur.py
|
glickmac/Misc_Scripts
|
7e18be79b84a309a1e79935f4470ea915141938d
|
[
"MIT"
] | 1
|
2020-07-30T17:37:12.000Z
|
2020-07-30T17:37:12.000Z
|
def find_all_around(arr, row, col):
top_left = arr[row-1][col-1]
top = arr[row-1][col]
top_right = arr[row-1][col+1]
right= arr[row][col+1]
bot_right = arr[row+1][col+1]
bot = arr[row+1][col]
bot_left = arr[row+1][col-1]
left = arr[row][col-1]
res = [top_left, top, top_right, right, bot_right, bot, bot_left, left]
return sum(res)
def boxBlur(arr):
col = len(arr[0])
row = len(arr)
res = []
for i in range(1,row-1):
mew = []
for j in range(1, col-1):
mew.append((find_all_around(arr, i, j) + arr[i][j])//9)
res.append(mew)
return res
| 25.416667
| 74
| 0.57377
|
da3c2923f5b21bb9de8cea20e3cf8e4a4f1116f3
| 36,516
|
py
|
Python
|
venv/Lib/site-packages/mistune.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 1,318
|
2019-07-11T10:34:39.000Z
|
2022-03-29T15:05:19.000Z
|
venv/Lib/site-packages/mistune.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 387
|
2019-09-05T16:33:09.000Z
|
2022-03-31T10:43:39.000Z
|
venv/Lib/site-packages/mistune.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 66
|
2019-11-11T15:33:12.000Z
|
2022-03-01T07:55:55.000Z
|
# coding: utf-8
"""
mistune
~~~~~~~
The fastest markdown parser in pure Python with renderer feature.
:copyright: (c) 2014 - 2018 by Hsiaoming Yang.
"""
import re
import inspect
__version__ = '0.8.4'
__author__ = 'Hsiaoming Yang <me@lepture.com>'
__all__ = [
'BlockGrammar', 'BlockLexer',
'InlineGrammar', 'InlineLexer',
'Renderer', 'Markdown',
'markdown', 'escape',
]
_key_pattern = re.compile(r'\s+')
_nonalpha_pattern = re.compile(r'\W')
_escape_pattern = re.compile(r'&(?!#?\w+;)')
_newline_pattern = re.compile(r'\r\n|\r')
_block_quote_leading_pattern = re.compile(r'^ *> ?', flags=re.M)
_block_code_leading_pattern = re.compile(r'^ {4}', re.M)
_inline_tags = [
'a', 'em', 'strong', 'small', 's', 'cite', 'q', 'dfn', 'abbr', 'data',
'time', 'code', 'var', 'samp', 'kbd', 'sub', 'sup', 'i', 'b', 'u', 'mark',
'ruby', 'rt', 'rp', 'bdi', 'bdo', 'span', 'br', 'wbr', 'ins', 'del',
'img', 'font',
]
_pre_tags = ['pre', 'script', 'style']
_valid_end = r'(?!:/|[^\w\s@]*@)\b'
_valid_attr = r'''\s*[a-zA-Z\-](?:\s*\=\s*(?:"[^"]*"|'[^']*'|[^\s'">]+))?'''
_block_tag = r'(?!(?:%s)\b)\w+%s' % ('|'.join(_inline_tags), _valid_end)
_scheme_blacklist = ('javascript:', 'vbscript:')
def _pure_pattern(regex):
pattern = regex.pattern
if pattern.startswith('^'):
pattern = pattern[1:]
return pattern
def _keyify(key):
key = escape(key.lower(), quote=True)
return _key_pattern.sub(' ', key)
def escape(text, quote=False, smart_amp=True):
"""Replace special characters "&", "<" and ">" to HTML-safe sequences.
The original cgi.escape will always escape "&", but you can control
this one for a smart escape amp.
:param quote: if set to True, " and ' will be escaped.
:param smart_amp: if set to False, & will always be escaped.
"""
if smart_amp:
text = _escape_pattern.sub('&', text)
else:
text = text.replace('&', '&')
text = text.replace('<', '<')
text = text.replace('>', '>')
if quote:
text = text.replace('"', '"')
text = text.replace("'", ''')
return text
def escape_link(url):
"""Remove dangerous URL schemes like javascript: and escape afterwards."""
lower_url = url.lower().strip('\x00\x1a \n\r\t')
for scheme in _scheme_blacklist:
if re.sub(r'[^A-Za-z0-9\/:]+', '', lower_url).startswith(scheme):
return ''
return escape(url, quote=True, smart_amp=False)
def preprocessing(text, tab=4):
text = _newline_pattern.sub('\n', text)
text = text.expandtabs(tab)
text = text.replace('\u2424', '\n')
pattern = re.compile(r'^ +$', re.M)
return pattern.sub('', text)
class BlockGrammar(object):
"""Grammars for block level tokens."""
def_links = re.compile(
r'^ *\[([^^\]]+)\]: *' # [key]:
r'<?([^\s>]+)>?' # <link> or link
r'(?: +["(]([^\n]+)[")])? *(?:\n+|$)'
)
def_footnotes = re.compile(
r'^\[\^([^\]]+)\]: *('
r'[^\n]*(?:\n+|$)' # [^key]:
r'(?: {1,}[^\n]*(?:\n+|$))*'
r')'
)
newline = re.compile(r'^\n+')
block_code = re.compile(r'^( {4}[^\n]+\n*)+')
fences = re.compile(
r'^ *(`{3,}|~{3,}) *([^`\s]+)? *\n' # ```lang
r'([\s\S]+?)\s*'
r'\1 *(?:\n+|$)' # ```
)
hrule = re.compile(r'^ {0,3}[-*_](?: *[-*_]){2,} *(?:\n+|$)')
heading = re.compile(r'^ *(#{1,6}) *([^\n]+?) *#* *(?:\n+|$)')
lheading = re.compile(r'^([^\n]+)\n *(=|-)+ *(?:\n+|$)')
block_quote = re.compile(r'^( *>[^\n]+(\n[^\n]+)*\n*)+')
list_block = re.compile(
r'^( *)(?=[*+-]|\d+\.)(([*+-])?(?:\d+\.)?) [\s\S]+?'
r'(?:'
r'\n+(?=\1?(?:[-*_] *){3,}(?:\n+|$))' # hrule
r'|\n+(?=%s)' # def links
r'|\n+(?=%s)' # def footnotes\
r'|\n+(?=\1(?(3)\d+\.|[*+-]) )' # heterogeneous bullet
r'|\n{2,}'
r'(?! )'
r'(?!\1(?:[*+-]|\d+\.) )\n*'
r'|'
r'\s*$)' % (
_pure_pattern(def_links),
_pure_pattern(def_footnotes),
)
)
list_item = re.compile(
r'^(( *)(?:[*+-]|\d+\.) [^\n]*'
r'(?:\n(?!\2(?:[*+-]|\d+\.) )[^\n]*)*)',
flags=re.M
)
list_bullet = re.compile(r'^ *(?:[*+-]|\d+\.) +')
paragraph = re.compile(
r'^((?:[^\n]+\n?(?!'
r'%s|%s|%s|%s|%s|%s|%s|%s|%s'
r'))+)\n*' % (
_pure_pattern(fences).replace(r'\1', r'\2'),
_pure_pattern(list_block).replace(r'\1', r'\3'),
_pure_pattern(hrule),
_pure_pattern(heading),
_pure_pattern(lheading),
_pure_pattern(block_quote),
_pure_pattern(def_links),
_pure_pattern(def_footnotes),
'<' + _block_tag,
)
)
block_html = re.compile(
r'^ *(?:%s|%s|%s) *(?:\n{2,}|\s*$)' % (
r'<!--[\s\S]*?-->',
r'<(%s)((?:%s)*?)>([\s\S]*?)<\/\1>' % (_block_tag, _valid_attr),
r'<%s(?:%s)*?\s*\/?>' % (_block_tag, _valid_attr),
)
)
table = re.compile(
r'^ *\|(.+)\n *\|( *[-:]+[-| :]*)\n((?: *\|.*(?:\n|$))*)\n*'
)
nptable = re.compile(
r'^ *(\S.*\|.*)\n *([-:]+ *\|[-| :]*)\n((?:.*\|.*(?:\n|$))*)\n*'
)
text = re.compile(r'^[^\n]+')
class BlockLexer(object):
"""Block level lexer for block grammars."""
grammar_class = BlockGrammar
default_rules = [
'newline', 'hrule', 'block_code', 'fences', 'heading',
'nptable', 'lheading', 'block_quote',
'list_block', 'block_html', 'def_links',
'def_footnotes', 'table', 'paragraph', 'text'
]
list_rules = (
'newline', 'block_code', 'fences', 'lheading', 'hrule',
'block_quote', 'list_block', 'block_html', 'text',
)
footnote_rules = (
'newline', 'block_code', 'fences', 'heading',
'nptable', 'lheading', 'hrule', 'block_quote',
'list_block', 'block_html', 'table', 'paragraph', 'text'
)
def __init__(self, rules=None, **kwargs):
self.tokens = []
self.def_links = {}
self.def_footnotes = {}
if not rules:
rules = self.grammar_class()
self.rules = rules
self._max_recursive_depth = kwargs.get('max_recursive_depth', 6)
self._list_depth = 0
self._blockquote_depth = 0
def __call__(self, text, rules=None):
return self.parse(text, rules)
def parse(self, text, rules=None):
text = text.rstrip('\n')
if not rules:
rules = self.default_rules
def manipulate(text):
for key in rules:
rule = getattr(self.rules, key)
m = rule.match(text)
if not m:
continue
getattr(self, 'parse_%s' % key)(m)
return m
return False # pragma: no cover
while text:
m = manipulate(text)
if m is not False:
text = text[len(m.group(0)):]
continue
if text: # pragma: no cover
raise RuntimeError('Infinite loop at: %s' % text)
return self.tokens
def parse_newline(self, m):
length = len(m.group(0))
if length > 1:
self.tokens.append({'type': 'newline'})
def parse_block_code(self, m):
# clean leading whitespace
code = _block_code_leading_pattern.sub('', m.group(0))
self.tokens.append({
'type': 'code',
'lang': None,
'text': code,
})
def parse_fences(self, m):
self.tokens.append({
'type': 'code',
'lang': m.group(2),
'text': m.group(3),
})
def parse_heading(self, m):
self.tokens.append({
'type': 'heading',
'level': len(m.group(1)),
'text': m.group(2),
})
def parse_lheading(self, m):
"""Parse setext heading."""
self.tokens.append({
'type': 'heading',
'level': 1 if m.group(2) == '=' else 2,
'text': m.group(1),
})
def parse_hrule(self, m):
self.tokens.append({'type': 'hrule'})
def parse_list_block(self, m):
bull = m.group(2)
self.tokens.append({
'type': 'list_start',
'ordered': '.' in bull,
})
self._list_depth += 1
if self._list_depth > self._max_recursive_depth:
self.tokens.append({'type': 'list_item_start'})
self.parse_text(m)
self.tokens.append({'type': 'list_item_end'})
else:
cap = m.group(0)
self._process_list_item(cap, bull)
self.tokens.append({'type': 'list_end'})
self._list_depth -= 1
def _process_list_item(self, cap, bull):
cap = self.rules.list_item.findall(cap)
_next = False
length = len(cap)
for i in range(length):
item = cap[i][0]
# remove the bullet
space = len(item)
item = self.rules.list_bullet.sub('', item)
# outdent
if '\n ' in item:
space = space - len(item)
pattern = re.compile(r'^ {1,%d}' % space, flags=re.M)
item = pattern.sub('', item)
# determine whether item is loose or not
loose = _next
if not loose and re.search(r'\n\n(?!\s*$)', item):
loose = True
rest = len(item)
if i != length - 1 and rest:
_next = item[rest-1] == '\n'
if not loose:
loose = _next
if loose:
t = 'loose_item_start'
else:
t = 'list_item_start'
self.tokens.append({'type': t})
# recurse
self.parse(item, self.list_rules)
self.tokens.append({'type': 'list_item_end'})
def parse_block_quote(self, m):
self.tokens.append({'type': 'block_quote_start'})
self._blockquote_depth += 1
if self._blockquote_depth > self._max_recursive_depth:
self.parse_text(m)
else:
# clean leading >
cap = _block_quote_leading_pattern.sub('', m.group(0))
self.parse(cap)
self.tokens.append({'type': 'block_quote_end'})
self._blockquote_depth -= 1
def parse_def_links(self, m):
key = _keyify(m.group(1))
self.def_links[key] = {
'link': m.group(2),
'title': m.group(3),
}
def parse_def_footnotes(self, m):
key = _keyify(m.group(1))
if key in self.def_footnotes:
# footnote is already defined
return
self.def_footnotes[key] = 0
self.tokens.append({
'type': 'footnote_start',
'key': key,
})
text = m.group(2)
if '\n' in text:
lines = text.split('\n')
whitespace = None
for line in lines[1:]:
space = len(line) - len(line.lstrip())
if space and (not whitespace or space < whitespace):
whitespace = space
newlines = [lines[0]]
for line in lines[1:]:
newlines.append(line[whitespace:])
text = '\n'.join(newlines)
self.parse(text, self.footnote_rules)
self.tokens.append({
'type': 'footnote_end',
'key': key,
})
def parse_table(self, m):
item = self._process_table(m)
cells = re.sub(r'(?: *\| *)?\n$', '', m.group(3))
cells = cells.split('\n')
for i, v in enumerate(cells):
v = re.sub(r'^ *\| *| *\| *$', '', v)
cells[i] = re.split(r' *(?<!\\)\| *', v)
item['cells'] = self._process_cells(cells)
self.tokens.append(item)
def parse_nptable(self, m):
item = self._process_table(m)
cells = re.sub(r'\n$', '', m.group(3))
cells = cells.split('\n')
for i, v in enumerate(cells):
cells[i] = re.split(r' *(?<!\\)\| *', v)
item['cells'] = self._process_cells(cells)
self.tokens.append(item)
def _process_table(self, m):
header = re.sub(r'^ *| *\| *$', '', m.group(1))
header = re.split(r' *\| *', header)
align = re.sub(r' *|\| *$', '', m.group(2))
align = re.split(r' *\| *', align)
for i, v in enumerate(align):
if re.search(r'^ *-+: *$', v):
align[i] = 'right'
elif re.search(r'^ *:-+: *$', v):
align[i] = 'center'
elif re.search(r'^ *:-+ *$', v):
align[i] = 'left'
else:
align[i] = None
item = {
'type': 'table',
'header': header,
'align': align,
}
return item
def _process_cells(self, cells):
for i, line in enumerate(cells):
for c, cell in enumerate(line):
# de-escape any pipe inside the cell here
cells[i][c] = re.sub('\\\\\|', '|', cell)
return cells
def parse_block_html(self, m):
tag = m.group(1)
if not tag:
text = m.group(0)
self.tokens.append({
'type': 'close_html',
'text': text
})
else:
attr = m.group(2)
text = m.group(3)
self.tokens.append({
'type': 'open_html',
'tag': tag,
'extra': attr,
'text': text
})
def parse_paragraph(self, m):
text = m.group(1).rstrip('\n')
self.tokens.append({'type': 'paragraph', 'text': text})
def parse_text(self, m):
text = m.group(0)
self.tokens.append({'type': 'text', 'text': text})
class InlineGrammar(object):
"""Grammars for inline level tokens."""
escape = re.compile(r'^\\([\\`*{}\[\]()#+\-.!_>~|])') # \* \+ \! ....
inline_html = re.compile(
r'^(?:%s|%s|%s)' % (
r'<!--[\s\S]*?-->',
r'<(\w+%s)((?:%s)*?)\s*>([\s\S]*?)<\/\1>' % (
_valid_end, _valid_attr),
r'<\w+%s(?:%s)*?\s*\/?>' % (_valid_end, _valid_attr),
)
)
autolink = re.compile(r'^<([^ >]+(@|:)[^ >]+)>')
link = re.compile(
r'^!?\[('
r'(?:\[[^^\]]*\]|[^\[\]]|\](?=[^\[]*\]))*'
r')\]\('
r'''\s*(<)?([\s\S]*?)(?(2)>)(?:\s+['"]([\s\S]*?)['"])?\s*'''
r'\)'
)
reflink = re.compile(
r'^!?\[('
r'(?:\[[^^\]]*\]|[^\[\]]|\](?=[^\[]*\]))*'
r')\]\s*\[([^^\]]*)\]'
)
nolink = re.compile(r'^!?\[((?:\[[^\]]*\]|[^\[\]])*)\]')
url = re.compile(r'''^(https?:\/\/[^\s<]+[^<.,:;"')\]\s])''')
double_emphasis = re.compile(
r'^_{2}([\s\S]+?)_{2}(?!_)' # __word__
r'|'
r'^\*{2}([\s\S]+?)\*{2}(?!\*)' # **word**
)
emphasis = re.compile(
r'^\b_((?:__|[^_])+?)_\b' # _word_
r'|'
r'^\*((?:\*\*|[^\*])+?)\*(?!\*)' # *word*
)
code = re.compile(r'^(`+)\s*([\s\S]*?[^`])\s*\1(?!`)') # `code`
linebreak = re.compile(r'^ {2,}\n(?!\s*$)')
strikethrough = re.compile(r'^~~(?=\S)([\s\S]*?\S)~~') # ~~word~~
footnote = re.compile(r'^\[\^([^\]]+)\]')
text = re.compile(r'^[\s\S]+?(?=[\\<!\[_*`~]|https?://| {2,}\n|$)')
def hard_wrap(self):
"""Grammar for hard wrap linebreak. You don't need to add two
spaces at the end of a line.
"""
self.linebreak = re.compile(r'^ *\n(?!\s*$)')
self.text = re.compile(
r'^[\s\S]+?(?=[\\<!\[_*`~]|https?://| *\n|$)'
)
class InlineLexer(object):
"""Inline level lexer for inline grammars."""
grammar_class = InlineGrammar
default_rules = [
'escape', 'inline_html', 'autolink', 'url',
'footnote', 'link', 'reflink', 'nolink',
'double_emphasis', 'emphasis', 'code',
'linebreak', 'strikethrough', 'text',
]
inline_html_rules = [
'escape', 'inline_html', 'autolink', 'url', 'link', 'reflink',
'nolink', 'double_emphasis', 'emphasis', 'code',
'linebreak', 'strikethrough', 'text',
]
def __init__(self, renderer, rules=None, **kwargs):
self.renderer = renderer
self.links = {}
self.footnotes = {}
self.footnote_index = 0
if not rules:
rules = self.grammar_class()
kwargs.update(self.renderer.options)
if kwargs.get('hard_wrap'):
rules.hard_wrap()
self.rules = rules
self._in_link = False
self._in_footnote = False
self._parse_inline_html = kwargs.get('parse_inline_html')
def __call__(self, text, rules=None):
return self.output(text, rules)
def setup(self, links, footnotes):
self.footnote_index = 0
self.links = links or {}
self.footnotes = footnotes or {}
def output(self, text, rules=None):
text = text.rstrip('\n')
if not rules:
rules = list(self.default_rules)
if self._in_footnote and 'footnote' in rules:
rules.remove('footnote')
output = self.renderer.placeholder()
def manipulate(text):
for key in rules:
pattern = getattr(self.rules, key)
m = pattern.match(text)
if not m:
continue
self.line_match = m
out = getattr(self, 'output_%s' % key)(m)
if out is not None:
return m, out
return False # pragma: no cover
while text:
ret = manipulate(text)
if ret is not False:
m, out = ret
output += out
text = text[len(m.group(0)):]
continue
if text: # pragma: no cover
raise RuntimeError('Infinite loop at: %s' % text)
return output
def output_escape(self, m):
text = m.group(1)
return self.renderer.escape(text)
def output_autolink(self, m):
link = m.group(1)
if m.group(2) == '@':
is_email = True
else:
is_email = False
return self.renderer.autolink(link, is_email)
def output_url(self, m):
link = m.group(1)
if self._in_link:
return self.renderer.text(link)
return self.renderer.autolink(link, False)
def output_inline_html(self, m):
tag = m.group(1)
if self._parse_inline_html and tag in _inline_tags:
text = m.group(3)
if tag == 'a':
self._in_link = True
text = self.output(text, rules=self.inline_html_rules)
self._in_link = False
else:
text = self.output(text, rules=self.inline_html_rules)
extra = m.group(2) or ''
html = '<%s%s>%s</%s>' % (tag, extra, text, tag)
else:
html = m.group(0)
return self.renderer.inline_html(html)
def output_footnote(self, m):
key = _keyify(m.group(1))
if key not in self.footnotes:
return None
if self.footnotes[key]:
return None
self.footnote_index += 1
self.footnotes[key] = self.footnote_index
return self.renderer.footnote_ref(key, self.footnote_index)
def output_link(self, m):
return self._process_link(m, m.group(3), m.group(4))
def output_reflink(self, m):
key = _keyify(m.group(2) or m.group(1))
if key not in self.links:
return None
ret = self.links[key]
return self._process_link(m, ret['link'], ret['title'])
def output_nolink(self, m):
key = _keyify(m.group(1))
if key not in self.links:
return None
ret = self.links[key]
return self._process_link(m, ret['link'], ret['title'])
def _process_link(self, m, link, title=None):
line = m.group(0)
text = m.group(1)
if line[0] == '!':
return self.renderer.image(link, title, text)
self._in_link = True
text = self.output(text)
self._in_link = False
return self.renderer.link(link, title, text)
def output_double_emphasis(self, m):
text = m.group(2) or m.group(1)
text = self.output(text)
return self.renderer.double_emphasis(text)
def output_emphasis(self, m):
text = m.group(2) or m.group(1)
text = self.output(text)
return self.renderer.emphasis(text)
def output_code(self, m):
text = m.group(2)
return self.renderer.codespan(text)
def output_linebreak(self, m):
return self.renderer.linebreak()
def output_strikethrough(self, m):
text = self.output(m.group(1))
return self.renderer.strikethrough(text)
def output_text(self, m):
text = m.group(0)
return self.renderer.text(text)
class Renderer(object):
"""The default HTML renderer for rendering Markdown.
"""
def __init__(self, **kwargs):
self.options = kwargs
def placeholder(self):
"""Returns the default, empty output value for the renderer.
All renderer methods use the '+=' operator to append to this value.
Default is a string so rendering HTML can build up a result string with
the rendered Markdown.
Can be overridden by Renderer subclasses to be types like an empty
list, allowing the renderer to create a tree-like structure to
represent the document (which can then be reprocessed later into a
separate format like docx or pdf).
"""
return ''
def block_code(self, code, lang=None):
"""Rendering block level code. ``pre > code``.
:param code: text content of the code block.
:param lang: language of the given code.
"""
code = code.rstrip('\n')
if not lang:
code = escape(code, smart_amp=False)
return '<pre><code>%s\n</code></pre>\n' % code
code = escape(code, quote=True, smart_amp=False)
return '<pre><code class="lang-%s">%s\n</code></pre>\n' % (lang, code)
def block_quote(self, text):
"""Rendering <blockquote> with the given text.
:param text: text content of the blockquote.
"""
return '<blockquote>%s\n</blockquote>\n' % text.rstrip('\n')
def block_html(self, html):
"""Rendering block level pure html content.
:param html: text content of the html snippet.
"""
if self.options.get('skip_style') and \
html.lower().startswith('<style'):
return ''
if self.options.get('escape'):
return escape(html)
return html
def header(self, text, level, raw=None):
"""Rendering header/heading tags like ``<h1>`` ``<h2>``.
:param text: rendered text content for the header.
:param level: a number for the header level, for example: 1.
:param raw: raw text content of the header.
"""
return '<h%d>%s</h%d>\n' % (level, text, level)
def hrule(self):
"""Rendering method for ``<hr>`` tag."""
if self.options.get('use_xhtml'):
return '<hr />\n'
return '<hr>\n'
def list(self, body, ordered=True):
"""Rendering list tags like ``<ul>`` and ``<ol>``.
:param body: body contents of the list.
:param ordered: whether this list is ordered or not.
"""
tag = 'ul'
if ordered:
tag = 'ol'
return '<%s>\n%s</%s>\n' % (tag, body, tag)
def list_item(self, text):
"""Rendering list item snippet. Like ``<li>``."""
return '<li>%s</li>\n' % text
def paragraph(self, text):
"""Rendering paragraph tags. Like ``<p>``."""
return '<p>%s</p>\n' % text.strip(' ')
def table(self, header, body):
"""Rendering table element. Wrap header and body in it.
:param header: header part of the table.
:param body: body part of the table.
"""
return (
'<table>\n<thead>%s</thead>\n'
'<tbody>\n%s</tbody>\n</table>\n'
) % (header, body)
def table_row(self, content):
"""Rendering a table row. Like ``<tr>``.
:param content: content of current table row.
"""
return '<tr>\n%s</tr>\n' % content
def table_cell(self, content, **flags):
"""Rendering a table cell. Like ``<th>`` ``<td>``.
:param content: content of current table cell.
:param header: whether this is header or not.
:param align: align of current table cell.
"""
if flags['header']:
tag = 'th'
else:
tag = 'td'
align = flags['align']
if not align:
return '<%s>%s</%s>\n' % (tag, content, tag)
return '<%s style="text-align:%s">%s</%s>\n' % (
tag, align, content, tag
)
def double_emphasis(self, text):
"""Rendering **strong** text.
:param text: text content for emphasis.
"""
return '<strong>%s</strong>' % text
def emphasis(self, text):
"""Rendering *emphasis* text.
:param text: text content for emphasis.
"""
return '<em>%s</em>' % text
def codespan(self, text):
"""Rendering inline `code` text.
:param text: text content for inline code.
"""
text = escape(text.rstrip(), smart_amp=False)
return '<code>%s</code>' % text
def linebreak(self):
"""Rendering line break like ``<br>``."""
if self.options.get('use_xhtml'):
return '<br />\n'
return '<br>\n'
def strikethrough(self, text):
"""Rendering ~~strikethrough~~ text.
:param text: text content for strikethrough.
"""
return '<del>%s</del>' % text
def text(self, text):
"""Rendering unformatted text.
:param text: text content.
"""
if self.options.get('parse_block_html'):
return text
return escape(text)
def escape(self, text):
"""Rendering escape sequence.
:param text: text content.
"""
return escape(text)
def autolink(self, link, is_email=False):
"""Rendering a given link or email address.
:param link: link content or email address.
:param is_email: whether this is an email or not.
"""
text = link = escape_link(link)
if is_email:
link = 'mailto:%s' % link
return '<a href="%s">%s</a>' % (link, text)
def link(self, link, title, text):
"""Rendering a given link with content and title.
:param link: href link for ``<a>`` tag.
:param title: title content for `title` attribute.
:param text: text content for description.
"""
link = escape_link(link)
if not title:
return '<a href="%s">%s</a>' % (link, text)
title = escape(title, quote=True)
return '<a href="%s" title="%s">%s</a>' % (link, title, text)
def image(self, src, title, text):
"""Rendering a image with title and text.
:param src: source link of the image.
:param title: title text of the image.
:param text: alt text of the image.
"""
src = escape_link(src)
text = escape(text, quote=True)
if title:
title = escape(title, quote=True)
html = '<img src="%s" alt="%s" title="%s"' % (src, text, title)
else:
html = '<img src="%s" alt="%s"' % (src, text)
if self.options.get('use_xhtml'):
return '%s />' % html
return '%s>' % html
def inline_html(self, html):
"""Rendering span level pure html content.
:param html: text content of the html snippet.
"""
if self.options.get('escape'):
return escape(html)
return html
def newline(self):
"""Rendering newline element."""
return ''
def footnote_ref(self, key, index):
"""Rendering the ref anchor of a footnote.
:param key: identity key for the footnote.
:param index: the index count of current footnote.
"""
html = (
'<sup class="footnote-ref" id="fnref-%s">'
'<a href="#fn-%s">%d</a></sup>'
) % (escape(key), escape(key), index)
return html
def footnote_item(self, key, text):
"""Rendering a footnote item.
:param key: identity key for the footnote.
:param text: text content of the footnote.
"""
back = (
'<a href="#fnref-%s" class="footnote">↩</a>'
) % escape(key)
text = text.rstrip()
if text.endswith('</p>'):
text = re.sub(r'<\/p>$', r'%s</p>' % back, text)
else:
text = '%s<p>%s</p>' % (text, back)
html = '<li id="fn-%s">%s</li>\n' % (escape(key), text)
return html
def footnotes(self, text):
"""Wrapper for all footnotes.
:param text: contents of all footnotes.
"""
html = '<div class="footnotes">\n%s<ol>%s</ol>\n</div>\n'
return html % (self.hrule(), text)
class Markdown(object):
"""The Markdown parser.
:param renderer: An instance of ``Renderer``.
:param inline: An inline lexer class or instance.
:param block: A block lexer class or instance.
"""
def __init__(self, renderer=None, inline=None, block=None, **kwargs):
if not renderer:
renderer = Renderer(**kwargs)
else:
kwargs.update(renderer.options)
self.renderer = renderer
if inline and inspect.isclass(inline):
inline = inline(renderer, **kwargs)
if block and inspect.isclass(block):
block = block(**kwargs)
if inline:
self.inline = inline
else:
self.inline = InlineLexer(renderer, **kwargs)
self.block = block or BlockLexer(BlockGrammar())
self.footnotes = []
self.tokens = []
# detect if it should parse text in block html
self._parse_block_html = kwargs.get('parse_block_html')
def __call__(self, text):
return self.parse(text)
def render(self, text):
"""Render the Markdown text.
:param text: markdown formatted text content.
"""
return self.parse(text)
def parse(self, text):
out = self.output(preprocessing(text))
keys = self.block.def_footnotes
# reset block
self.block.def_links = {}
self.block.def_footnotes = {}
# reset inline
self.inline.links = {}
self.inline.footnotes = {}
if not self.footnotes:
return out
footnotes = filter(lambda o: keys.get(o['key']), self.footnotes)
self.footnotes = sorted(
footnotes, key=lambda o: keys.get(o['key']), reverse=True
)
body = self.renderer.placeholder()
while self.footnotes:
note = self.footnotes.pop()
body += self.renderer.footnote_item(
note['key'], note['text']
)
out += self.renderer.footnotes(body)
return out
def pop(self):
if not self.tokens:
return None
self.token = self.tokens.pop()
return self.token
def peek(self):
if self.tokens:
return self.tokens[-1]
return None # pragma: no cover
def output(self, text, rules=None):
self.tokens = self.block(text, rules)
self.tokens.reverse()
self.inline.setup(self.block.def_links, self.block.def_footnotes)
out = self.renderer.placeholder()
while self.pop():
out += self.tok()
return out
def tok(self):
t = self.token['type']
# sepcial cases
if t.endswith('_start'):
t = t[:-6]
return getattr(self, 'output_%s' % t)()
def tok_text(self):
text = self.token['text']
while self.peek()['type'] == 'text':
text += '\n' + self.pop()['text']
return self.inline(text)
def output_newline(self):
return self.renderer.newline()
def output_hrule(self):
return self.renderer.hrule()
def output_heading(self):
return self.renderer.header(
self.inline(self.token['text']),
self.token['level'],
self.token['text'],
)
def output_code(self):
return self.renderer.block_code(
self.token['text'], self.token['lang']
)
def output_table(self):
aligns = self.token['align']
aligns_length = len(aligns)
cell = self.renderer.placeholder()
# header part
header = self.renderer.placeholder()
for i, value in enumerate(self.token['header']):
align = aligns[i] if i < aligns_length else None
flags = {'header': True, 'align': align}
cell += self.renderer.table_cell(self.inline(value), **flags)
header += self.renderer.table_row(cell)
# body part
body = self.renderer.placeholder()
for i, row in enumerate(self.token['cells']):
cell = self.renderer.placeholder()
for j, value in enumerate(row):
align = aligns[j] if j < aligns_length else None
flags = {'header': False, 'align': align}
cell += self.renderer.table_cell(self.inline(value), **flags)
body += self.renderer.table_row(cell)
return self.renderer.table(header, body)
def output_block_quote(self):
body = self.renderer.placeholder()
while self.pop()['type'] != 'block_quote_end':
body += self.tok()
return self.renderer.block_quote(body)
def output_list(self):
ordered = self.token['ordered']
body = self.renderer.placeholder()
while self.pop()['type'] != 'list_end':
body += self.tok()
return self.renderer.list(body, ordered)
def output_list_item(self):
body = self.renderer.placeholder()
while self.pop()['type'] != 'list_item_end':
if self.token['type'] == 'text':
body += self.tok_text()
else:
body += self.tok()
return self.renderer.list_item(body)
def output_loose_item(self):
body = self.renderer.placeholder()
while self.pop()['type'] != 'list_item_end':
body += self.tok()
return self.renderer.list_item(body)
def output_footnote(self):
self.inline._in_footnote = True
body = self.renderer.placeholder()
key = self.token['key']
while self.pop()['type'] != 'footnote_end':
body += self.tok()
self.footnotes.append({'key': key, 'text': body})
self.inline._in_footnote = False
return self.renderer.placeholder()
def output_close_html(self):
text = self.token['text']
return self.renderer.block_html(text)
def output_open_html(self):
text = self.token['text']
tag = self.token['tag']
if self._parse_block_html and tag not in _pre_tags:
text = self.inline(text, rules=self.inline.inline_html_rules)
extra = self.token.get('extra') or ''
html = '<%s%s>%s</%s>' % (tag, extra, text, tag)
return self.renderer.block_html(html)
def output_paragraph(self):
return self.renderer.paragraph(self.inline(self.token['text']))
def output_text(self):
return self.renderer.paragraph(self.tok_text())
def markdown(text, escape=True, **kwargs):
"""Render markdown formatted text to html.
:param text: markdown formatted text content.
:param escape: if set to False, all html tags will not be escaped.
:param use_xhtml: output with xhtml tags.
:param hard_wrap: if set to True, it will use the GFM line breaks feature.
:param parse_block_html: parse text only in block level html.
:param parse_inline_html: parse text only in inline level html.
"""
return Markdown(escape=escape, **kwargs)(text)
| 30.81519
| 79
| 0.51257
|
8b9644fe844a47c0591d9190ef81461e054347dd
| 995
|
py
|
Python
|
rustfst-python-bench/rustfst_python_bench/algorithms/compose.py
|
llogiq/rustfst
|
3172547ad57c5ae7e1c0474fd49187a81555b516
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 97
|
2018-11-22T00:55:26.000Z
|
2022-03-28T13:45:41.000Z
|
rustfst-python-bench/rustfst_python_bench/algorithms/compose.py
|
llogiq/rustfst
|
3172547ad57c5ae7e1c0474fd49187a81555b516
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 41
|
2019-08-02T23:29:45.000Z
|
2022-03-09T15:05:31.000Z
|
rustfst-python-bench/rustfst_python_bench/algorithms/compose.py
|
llogiq/rustfst
|
3172547ad57c5ae7e1c0474fd49187a81555b516
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2018-10-22T22:06:19.000Z
|
2022-01-10T10:48:26.000Z
|
from rustfst_python_bench.utils import check_fst_equals
class ComposeAlgorithm:
def __init__(self, compose_type="default"):
self.compose_type = compose_type
@classmethod
def openfst_cli(cls):
return "fstcompose"
@classmethod
def rustfst_subcommand(cls):
return "compose"
def get_openfst_bench_cli(self):
if self.compose_type == "default":
return "bench_compose", []
elif self.compose_type == "lookahead":
return "bench_compose_lookahead", []
else:
raise RuntimeError(f"Unknown compose_type={self.compose_type}")
def get_cli_args(self):
return f"--compose_type={self.compose_type}"
@classmethod
def get_parameters(cls):
compose_types = ["default", "lookahead"]
return [cls(compose_type=m) for m in compose_types]
def check_correctness(self, path_res_openfst, path_res_rustfst):
check_fst_equals(path_res_openfst, path_res_rustfst)
| 28.428571
| 75
| 0.675377
|
9792dc66325ef5c04ec85ed8922eb973e0a161f2
| 9,168
|
py
|
Python
|
vif_plug_linux_bridge/linux_net.py
|
mail2nsrajesh/os-vif
|
6a9017d1dcf2a0a4ab8bf35f39d4bfb7cb56027d
|
[
"Apache-2.0"
] | null | null | null |
vif_plug_linux_bridge/linux_net.py
|
mail2nsrajesh/os-vif
|
6a9017d1dcf2a0a4ab8bf35f39d4bfb7cb56027d
|
[
"Apache-2.0"
] | null | null | null |
vif_plug_linux_bridge/linux_net.py
|
mail2nsrajesh/os-vif
|
6a9017d1dcf2a0a4ab8bf35f39d4bfb7cb56027d
|
[
"Apache-2.0"
] | null | null | null |
# Derived from nova/network/linux_net.py
#
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implements vlans, bridges, and iptables rules using linux utilities."""
import os
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import excutils
from vif_plug_linux_bridge import privsep
LOG = logging.getLogger(__name__)
_IPTABLES_MANAGER = None
def device_exists(device):
"""Check if ethernet device exists."""
return os.path.exists('/sys/class/net/%s' % device)
def _set_device_mtu(dev, mtu):
"""Set the device MTU."""
processutils.execute('ip', 'link', 'set', dev, 'mtu', mtu,
check_exit_code=[0, 2, 254])
def _ip_bridge_cmd(action, params, device):
"""Build commands to add/del ips to bridges/devices."""
cmd = ['ip', 'addr', action]
cmd.extend(params)
cmd.extend(['dev', device])
return cmd
@privsep.vif_plug.entrypoint
def ensure_vlan_bridge(vlan_num, bridge, bridge_interface,
net_attrs=None, mac_address=None,
mtu=None):
"""Create a vlan and bridge unless they already exist."""
interface = _ensure_vlan_privileged(vlan_num, bridge_interface,
mac_address, mtu=mtu)
_ensure_bridge_privileged(bridge, interface, net_attrs)
_ensure_bridge_filtering(bridge, None)
return interface
@lockutils.synchronized('nova-lock_vlan', external=True)
def _ensure_vlan_privileged(vlan_num, bridge_interface, mac_address, mtu):
"""Create a vlan unless it already exists.
This assumes the caller is already annotated to run
with elevated privileges.
"""
interface = 'vlan%s' % vlan_num
if not device_exists(interface):
LOG.debug('Starting VLAN interface %s', interface)
processutils.execute('ip', 'link', 'add', 'link',
bridge_interface, 'name', interface, 'type',
'vlan', 'id', vlan_num,
check_exit_code=[0, 2, 254])
# (danwent) the bridge will inherit this address, so we want to
# make sure it is the value set from the NetworkManager
if mac_address:
processutils.execute('ip', 'link', 'set', interface,
'address', mac_address,
check_exit_code=[0, 2, 254])
processutils.execute('ip', 'link', 'set', interface, 'up',
check_exit_code=[0, 2, 254])
if mtu:
# NOTE(vish): set mtu every time to ensure that changes to mtu get
# propogated
_set_device_mtu(interface, mtu)
else:
LOG.debug("MTU not set on %(interface_name)s interface",
{'interface_name': interface})
return interface
@lockutils.synchronized('nova-lock_bridge', external=True)
def ensure_bridge(bridge, interface, net_attrs=None, gateway=True,
filtering=True):
_ensure_bridge_privileged(bridge, interface, net_attrs, gateway, filtering)
if filtering:
_ensure_bridge_filtering(bridge, gateway)
@privsep.vif_plug.entrypoint
def _ensure_bridge_privileged(bridge, interface, net_attrs, gateway,
filtering=True):
"""Create a bridge unless it already exists.
:param interface: the interface to create the bridge on.
:param net_attrs: dictionary with attributes used to create bridge.
:param gateway: whether or not the bridge is a gateway.
:param filtering: whether or not to create filters on the bridge.
If net_attrs is set, it will add the net_attrs['gateway'] to the bridge
using net_attrs['broadcast'] and net_attrs['cidr']. It will also add
the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set.
The code will attempt to move any ips that already exist on the
interface onto the bridge and reset the default gateway if necessary.
"""
if not device_exists(bridge):
LOG.debug('Starting Bridge %s', bridge)
try:
processutils.execute('brctl', 'addbr', bridge)
except Exception:
with excutils.save_and_reraise_exception() as ectx:
ectx.reraise = not device_exists(bridge)
processutils.execute('brctl', 'setfd', bridge, 0)
# processutils.execute('brctl setageing %s 10' % bridge)
processutils.execute('brctl', 'stp', bridge, 'off')
disv6 = ('/proc/sys/net/ipv6/conf/%s/disable_ipv6' % bridge)
if os.path.exists(disv6):
processutils.execute('tee',
disv6,
process_input='1',
check_exit_code=[0, 1])
# (danwent) bridge device MAC address can't be set directly.
# instead it inherits the MAC address of the first device on the
# bridge, which will either be the vlan interface, or a
# physical NIC.
processutils.execute('ip', 'link', 'set', bridge, 'up')
if interface:
LOG.debug('Adding interface %(interface)s to bridge %(bridge)s',
{'interface': interface, 'bridge': bridge})
out, err = processutils.execute('brctl', 'addif', bridge,
interface, check_exit_code=False)
if (err and err != "device %s is already a member of a bridge; "
"can't enslave it to bridge %s.\n" % (interface, bridge)):
msg = _('Failed to add interface: %s') % err
raise Exception(msg)
out, err = processutils.execute('ip', 'link', 'set',
interface, 'up', check_exit_code=False)
# NOTE(vish): This will break if there is already an ip on the
# interface, so we move any ips to the bridge
# NOTE(danms): We also need to copy routes to the bridge so as
# not to break existing connectivity on the interface
old_routes = []
out, err = processutils.execute('ip', 'route', 'show', 'dev',
interface)
for line in out.split('\n'):
fields = line.split()
if fields and 'via' in fields:
old_routes.append(fields)
processutils.execute('ip', 'route', 'del', *fields)
out, err = processutils.execute('ip', 'addr', 'show', 'dev', interface,
'scope', 'global')
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == 'inet':
if fields[-2] in ('secondary', 'dynamic', ):
params = fields[1:-2]
else:
params = fields[1:-1]
processutils.execute(*_ip_bridge_cmd('del', params,
fields[-1]),
check_exit_code=[0, 2, 254])
processutils.execute(*_ip_bridge_cmd('add', params,
bridge),
check_exit_code=[0, 2, 254])
for fields in old_routes:
processutils.execute('ip', 'route', 'add', *fields)
def _ensure_bridge_filtering(bridge, gateway):
# This method leaves privsep usage to iptables manager
# Don't forward traffic unless we were told to be a gateway
LOG.debug("Ensuring filtering %s to %s", bridge, gateway)
global _IPTABLES_MANAGER
ipv4_filter = _IPTABLES_MANAGER.ipv4['filter']
if gateway:
for rule in _IPTABLES_MANAGER.get_gateway_rules(bridge):
ipv4_filter.add_rule(*rule)
else:
ipv4_filter.add_rule('FORWARD',
('--in-interface %s -j %s'
% (bridge,
_IPTABLES_MANAGER.iptables_drop_action)))
ipv4_filter.add_rule('FORWARD',
('--out-interface %s -j %s'
% (bridge,
_IPTABLES_MANAGER.iptables_drop_action)))
_IPTABLES_MANAGER.apply()
def configure(iptables_mgr):
"""Configure the iptables manager impl.
:param iptables_mgr: the iptables manager instance
"""
global _IPTABLES_MANAGER
_IPTABLES_MANAGER = iptables_mgr
| 42.055046
| 79
| 0.599476
|
b2d831db91ab14ea096d0eff09632c7391805593
| 3,247
|
py
|
Python
|
src/msrnn_summary/msrnn_summary_run.py
|
ThuYShao/MatchZoo-py
|
dd8ff1328af58d3d14aacd1a7d56d79bbf847c15
|
[
"Apache-2.0"
] | null | null | null |
src/msrnn_summary/msrnn_summary_run.py
|
ThuYShao/MatchZoo-py
|
dd8ff1328af58d3d14aacd1a7d56d79bbf847c15
|
[
"Apache-2.0"
] | null | null | null |
src/msrnn_summary/msrnn_summary_run.py
|
ThuYShao/MatchZoo-py
|
dd8ff1328af58d3d14aacd1a7d56d79bbf847c15
|
[
"Apache-2.0"
] | 1
|
2020-07-28T03:07:31.000Z
|
2020-07-28T03:07:31.000Z
|
# -*- coding: utf-8 -*-
__author__ = 'yshao'
import torch
import numpy as np
import pandas as pd
import matchzoo as mz
import os
print('matchzoo version', mz.__version__)
DATA_DIR = '/data/disk2/private/guozhipeng/syq/coliee/Case_Law/format/matchzoo'
ranking_task = mz.tasks.Ranking(losses=mz.losses.RankCrossEntropyLoss(num_neg=1))
ranking_task.metrics = [
mz.metrics.Precision(k=5),
mz.metrics.Recall(k=5),
mz.metrics.F1(k=5)
]
print("`ranking_task` initialized with metrics", ranking_task.metrics)
train_pack_raw = mz.pack(pd.read_csv(os.path.join(DATA_DIR, 'train_512_bm25.csv'), index_col=False, encoding='utf8'), 'ranking')
dev_pack_raw = mz.pack(pd.read_csv(os.path.join(DATA_DIR, 'dev_512_bm25.csv'), index_col=False, encoding='utf8'), 'ranking')
test_pack_raw = mz.pack(pd.read_csv(os.path.join(DATA_DIR, 'test_512_bm25.csv'), index_col=False, encoding='utf8'), 'ranking')
print('data loaded as `train_pack_raw` `dev_pack_raw` `test_pack_raw`')
preprocessor = mz.models.MatchSRNN.get_default_preprocessor(
filter_mode='df',
filter_low_freq=2,
)
train_pack_processed = preprocessor.fit_transform(train_pack_raw)
dev_pack_processed = preprocessor.transform(dev_pack_raw)
test_pack_processed = preprocessor.transform(test_pack_raw)
print(preprocessor.context)
glove_embedding = mz.datasets.embeddings.load_glove_embedding(dimension=300)
term_index = preprocessor.context['vocab_unit'].state['term_index']
embedding_matrix = glove_embedding.build_matrix(term_index)
l2_norm = np.sqrt((embedding_matrix * embedding_matrix).sum(axis=1))
embedding_matrix = embedding_matrix / l2_norm[:, np.newaxis]
trainset = mz.dataloader.Dataset(
data_pack=train_pack_processed,
)
testset = mz.dataloader.Dataset(
data_pack=test_pack_processed
)
validset = mz.dataloader.Dataset(
data_pack=dev_pack_processed
)
padding_callback = mz.models.MatchSRNN.get_default_padding_callback(
fixed_length_left=512,
fixed_length_right=512,
pad_word_value=0,
pad_word_mode='pre'
)
trainloader = mz.dataloader.DataLoader(
dataset=trainset,
batch_size=16,
stage='train',
sort=False,
shuffle=True,
callback=padding_callback,
num_workers=4
)
validloader = mz.dataloader.DataLoader(
dataset=validset,
batch_size=8,
stage='dev',
sort=False,
callback=padding_callback,
num_workers=2
)
testloader = mz.dataloader.DataLoader(
dataset=testset,
batch_size=8,
stage='dev',
sort=False,
callback=padding_callback,
num_workers=2
)
model = mz.models.MatchSRNN()
model.params['task'] = ranking_task
model.params['embedding'] = embedding_matrix
model.params['channels'] = 4
model.params['units'] = 10
model.params['dropout'] = 0.2
model.params['direction'] = 'lt'
model.build()
print(model)
print('Trainable params: ', sum(p.numel() for p in model.parameters() if p.requires_grad))
model.build()
print(model, sum(p.numel() for p in model.parameters() if p.requires_grad))
optimizer = torch.optim.Adadelta(model.parameters())
trainer = mz.trainers.Trainer(
model=model,
optimizer=optimizer,
trainloader=trainloader,
validloader=validloader,
testloader=testloader,
validate_interval=None,
epochs=10
)
trainer.run()
| 26.398374
| 128
| 0.749307
|
16e28d369818cd1865621753db585621a9783aa5
| 653
|
py
|
Python
|
tottle/polling/abc.py
|
muffleo/tottle
|
69a5bdda879ab56d43505d517d3369a687c135a2
|
[
"MIT"
] | 12
|
2020-09-06T15:31:34.000Z
|
2021-02-27T20:30:34.000Z
|
tottle/polling/abc.py
|
cyanlabs-org/tottle
|
6cf02022ed7b445c9b5af475c6e854b91780d792
|
[
"MIT"
] | 2
|
2021-04-13T06:43:42.000Z
|
2021-07-07T20:52:39.000Z
|
tottle/polling/abc.py
|
cyanlabs-org/tottle
|
6cf02022ed7b445c9b5af475c6e854b91780d792
|
[
"MIT"
] | 4
|
2020-09-12T03:09:25.000Z
|
2021-03-22T08:52:04.000Z
|
from abc import ABC, abstractmethod
from typing import AsyncIterator, Any, Optional
from tottle.exception_factory import ABCErrorHandler
from tottle.api import ABCAPI
class ABCPolling(ABC):
@abstractmethod
async def get_updates(self) -> Any:
pass
@abstractmethod
async def listen(self) -> AsyncIterator[dict]:
pass
@property
@abstractmethod
def api(self) -> "ABCAPI":
pass
@api.setter
def api(self, new_api: "ABCAPI"):
pass
@abstractmethod
def construct(
self, api: "ABCAPI", error_handler: Optional["ABCErrorHandler"] = None
) -> "ABCPolling":
pass
| 21.064516
| 78
| 0.656968
|
dfc5e1cb286c79dc39b28a496122512ed784933a
| 1,404
|
py
|
Python
|
config_pb2_grpc.py
|
neolinsu/dlrm
|
3caad5079032b9603df93d9dae57f40e495eb540
|
[
"MIT"
] | null | null | null |
config_pb2_grpc.py
|
neolinsu/dlrm
|
3caad5079032b9603df93d9dae57f40e495eb540
|
[
"MIT"
] | null | null | null |
config_pb2_grpc.py
|
neolinsu/dlrm
|
3caad5079032b9603df93d9dae57f40e495eb540
|
[
"MIT"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import config_pb2 as config__pb2
class ConfigStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetConfig = channel.unary_unary(
'/hybridtraining.Config/GetConfig',
request_serializer=config__pb2.ConfigRequest.SerializeToString,
response_deserializer=config__pb2.ConfigReply.FromString,
)
class ConfigServicer(object):
# missing associated documentation comment in .proto file
pass
def GetConfig(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ConfigServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetConfig': grpc.unary_unary_rpc_method_handler(
servicer.GetConfig,
request_deserializer=config__pb2.ConfigRequest.FromString,
response_serializer=config__pb2.ConfigReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'hybridtraining.Config', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 29.87234
| 72
| 0.746439
|
b128a6549876bdcdd362171efd4462cfac8075cd
| 18,529
|
py
|
Python
|
sdk/python/pulumi_aws/cfg/rule.py
|
michael-golden/pulumi-aws
|
165e876e166ecab1870e857822247585d78aef64
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/cfg/rule.py
|
michael-golden/pulumi-aws
|
165e876e166ecab1870e857822247585d78aef64
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/cfg/rule.py
|
michael-golden/pulumi-aws
|
165e876e166ecab1870e857822247585d78aef64
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Rule(pulumi.CustomResource):
arn: pulumi.Output[str]
"""
The ARN of the config rule
"""
description: pulumi.Output[str]
"""
Description of the rule
"""
input_parameters: pulumi.Output[str]
"""
A string in JSON format that is passed to the AWS Config rule Lambda function.
"""
maximum_execution_frequency: pulumi.Output[str]
"""
The frequency that you want AWS Config to run evaluations for a rule that
is triggered periodically. If specified, requires `message_type` to be `ScheduledNotification`.
"""
name: pulumi.Output[str]
"""
The name of the rule
"""
rule_id: pulumi.Output[str]
"""
The ID of the config rule
"""
scope: pulumi.Output[dict]
"""
Scope defines which resources can trigger an evaluation for the rule as documented below.
* `complianceResourceId` (`str`) - The IDs of the only AWS resource that you want to trigger an evaluation for the rule.
If you specify a resource ID, you must specify one resource type for `compliance_resource_types`.
* `complianceResourceTypes` (`list`) - A list of resource types of only those AWS resources that you want to trigger an
evaluation for the rule. e.g. `AWS::EC2::Instance`. You can only specify one type if you also specify
a resource ID for `compliance_resource_id`. See [relevant part of AWS Docs](http://docs.aws.amazon.com/config/latest/APIReference/API_ResourceIdentifier.html#config-Type-ResourceIdentifier-resourceType) for available types.
* `tagKey` (`str`) - The tag key that is applied to only those AWS resources that you want you
want to trigger an evaluation for the rule.
* `tagValue` (`str`) - The tag value applied to only those AWS resources that you want to trigger an evaluation for the rule.
"""
source: pulumi.Output[dict]
"""
Source specifies the rule owner, the rule identifier, and the notifications that cause
the function to evaluate your AWS resources as documented below.
* `owner` (`str`) - Indicates whether AWS or the customer owns and manages the AWS Config rule. Valid values are `AWS` or `CUSTOM_LAMBDA`. For more information about managed rules, see the [AWS Config Managed Rules documentation](https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config_use-managed-rules.html). For more information about custom rules, see the [AWS Config Custom Rules documentation](https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config_develop-rules.html). Custom Lambda Functions require permissions to allow the AWS Config service to invoke them, e.g. via the `lambda.Permission` resource.
* `sourceDetails` (`list`) - Provides the source and type of the event that causes AWS Config to evaluate your AWS resources. Only valid if `owner` is `CUSTOM_LAMBDA`.
* `eventSource` (`str`) - The source of the event, such as an AWS service, that triggers AWS Config
to evaluate your AWS resources. This defaults to `aws.config` and is the only valid value.
* `maximum_execution_frequency` (`str`) - The frequency that you want AWS Config to run evaluations for a rule that
is triggered periodically. If specified, requires `message_type` to be `ScheduledNotification`.
* `messageType` (`str`) - The type of notification that triggers AWS Config to run an evaluation for a rule. You can specify the following notification types:
* `sourceIdentifier` (`str`) - For AWS Config managed rules, a predefined identifier, e.g `IAM_PASSWORD_POLICY`. For custom Lambda rules, the identifier is the ARN of the Lambda Function, such as `arn:aws:lambda:us-east-1:123456789012:function:custom_rule_name` or the `arn` attribute of the `lambda.Function` resource.
"""
tags: pulumi.Output[dict]
"""
A map of tags to assign to the resource.
"""
def __init__(__self__, resource_name, opts=None, description=None, input_parameters=None, maximum_execution_frequency=None, name=None, scope=None, source=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Provides an AWS Config Rule.
> **Note:** Config Rule requires an existing `Configuration Recorder` to be present. Use of `depends_on` is recommended (as shown below) to avoid race conditions.
## Example Usage
### AWS Managed Rules
AWS managed rules can be used by setting the source owner to `AWS` and the source identifier to the name of the managed rule. More information about AWS managed rules can be found in the [AWS Config Developer Guide](https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config_use-managed-rules.html).
```python
import pulumi
import pulumi_aws as aws
rule = aws.cfg.Rule("rule", source={
"owner": "AWS",
"sourceIdentifier": "S3_BUCKET_VERSIONING_ENABLED",
},
opts=ResourceOptions(depends_on=["aws_config_configuration_recorder.foo"]))
role = aws.iam.Role("role", assume_role_policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "config.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
\"\"\")
foo = aws.cfg.Recorder("foo", role_arn=role.arn)
role_policy = aws.iam.RolePolicy("rolePolicy",
policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": "config:Put*",
"Effect": "Allow",
"Resource": "*"
}
]
}
\"\"\",
role=role.id)
```
### Custom Rules
Custom rules can be used by setting the source owner to `CUSTOM_LAMBDA` and the source identifier to the Amazon Resource Name (ARN) of the Lambda Function. The AWS Config service must have permissions to invoke the Lambda Function, e.g. via the `lambda.Permission` resource. More information about custom rules can be found in the [AWS Config Developer Guide](https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config_develop-rules.html).
```python
import pulumi
import pulumi_aws as aws
example_recorder = aws.cfg.Recorder("exampleRecorder")
example_function = aws.lambda_.Function("exampleFunction")
example_permission = aws.lambda_.Permission("examplePermission",
action="lambda:InvokeFunction",
function=example_function.arn,
principal="config.amazonaws.com")
example_rule = aws.cfg.Rule("exampleRule", source={
"owner": "CUSTOM_LAMBDA",
"sourceIdentifier": example_function.arn,
},
opts=ResourceOptions(depends_on=[
"aws_config_configuration_recorder.example",
"aws_lambda_permission.example",
]))
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description of the rule
:param pulumi.Input[str] input_parameters: A string in JSON format that is passed to the AWS Config rule Lambda function.
:param pulumi.Input[str] maximum_execution_frequency: The frequency that you want AWS Config to run evaluations for a rule that
is triggered periodically. If specified, requires `message_type` to be `ScheduledNotification`.
:param pulumi.Input[str] name: The name of the rule
:param pulumi.Input[dict] scope: Scope defines which resources can trigger an evaluation for the rule as documented below.
:param pulumi.Input[dict] source: Source specifies the rule owner, the rule identifier, and the notifications that cause
the function to evaluate your AWS resources as documented below.
:param pulumi.Input[dict] tags: A map of tags to assign to the resource.
The **scope** object supports the following:
* `complianceResourceId` (`pulumi.Input[str]`) - The IDs of the only AWS resource that you want to trigger an evaluation for the rule.
If you specify a resource ID, you must specify one resource type for `compliance_resource_types`.
* `complianceResourceTypes` (`pulumi.Input[list]`) - A list of resource types of only those AWS resources that you want to trigger an
evaluation for the rule. e.g. `AWS::EC2::Instance`. You can only specify one type if you also specify
a resource ID for `compliance_resource_id`. See [relevant part of AWS Docs](http://docs.aws.amazon.com/config/latest/APIReference/API_ResourceIdentifier.html#config-Type-ResourceIdentifier-resourceType) for available types.
* `tagKey` (`pulumi.Input[str]`) - The tag key that is applied to only those AWS resources that you want you
want to trigger an evaluation for the rule.
* `tagValue` (`pulumi.Input[str]`) - The tag value applied to only those AWS resources that you want to trigger an evaluation for the rule.
The **source** object supports the following:
* `owner` (`pulumi.Input[str]`) - Indicates whether AWS or the customer owns and manages the AWS Config rule. Valid values are `AWS` or `CUSTOM_LAMBDA`. For more information about managed rules, see the [AWS Config Managed Rules documentation](https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config_use-managed-rules.html). For more information about custom rules, see the [AWS Config Custom Rules documentation](https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config_develop-rules.html). Custom Lambda Functions require permissions to allow the AWS Config service to invoke them, e.g. via the `lambda.Permission` resource.
* `sourceDetails` (`pulumi.Input[list]`) - Provides the source and type of the event that causes AWS Config to evaluate your AWS resources. Only valid if `owner` is `CUSTOM_LAMBDA`.
* `eventSource` (`pulumi.Input[str]`) - The source of the event, such as an AWS service, that triggers AWS Config
to evaluate your AWS resources. This defaults to `aws.config` and is the only valid value.
* `maximum_execution_frequency` (`pulumi.Input[str]`) - The frequency that you want AWS Config to run evaluations for a rule that
is triggered periodically. If specified, requires `message_type` to be `ScheduledNotification`.
* `messageType` (`pulumi.Input[str]`) - The type of notification that triggers AWS Config to run an evaluation for a rule. You can specify the following notification types:
* `sourceIdentifier` (`pulumi.Input[str]`) - For AWS Config managed rules, a predefined identifier, e.g `IAM_PASSWORD_POLICY`. For custom Lambda rules, the identifier is the ARN of the Lambda Function, such as `arn:aws:lambda:us-east-1:123456789012:function:custom_rule_name` or the `arn` attribute of the `lambda.Function` resource.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
__props__['input_parameters'] = input_parameters
__props__['maximum_execution_frequency'] = maximum_execution_frequency
__props__['name'] = name
__props__['scope'] = scope
if source is None:
raise TypeError("Missing required property 'source'")
__props__['source'] = source
__props__['tags'] = tags
__props__['arn'] = None
__props__['rule_id'] = None
super(Rule, __self__).__init__(
'aws:cfg/rule:Rule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, arn=None, description=None, input_parameters=None, maximum_execution_frequency=None, name=None, rule_id=None, scope=None, source=None, tags=None):
"""
Get an existing Rule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The ARN of the config rule
:param pulumi.Input[str] description: Description of the rule
:param pulumi.Input[str] input_parameters: A string in JSON format that is passed to the AWS Config rule Lambda function.
:param pulumi.Input[str] maximum_execution_frequency: The frequency that you want AWS Config to run evaluations for a rule that
is triggered periodically. If specified, requires `message_type` to be `ScheduledNotification`.
:param pulumi.Input[str] name: The name of the rule
:param pulumi.Input[str] rule_id: The ID of the config rule
:param pulumi.Input[dict] scope: Scope defines which resources can trigger an evaluation for the rule as documented below.
:param pulumi.Input[dict] source: Source specifies the rule owner, the rule identifier, and the notifications that cause
the function to evaluate your AWS resources as documented below.
:param pulumi.Input[dict] tags: A map of tags to assign to the resource.
The **scope** object supports the following:
* `complianceResourceId` (`pulumi.Input[str]`) - The IDs of the only AWS resource that you want to trigger an evaluation for the rule.
If you specify a resource ID, you must specify one resource type for `compliance_resource_types`.
* `complianceResourceTypes` (`pulumi.Input[list]`) - A list of resource types of only those AWS resources that you want to trigger an
evaluation for the rule. e.g. `AWS::EC2::Instance`. You can only specify one type if you also specify
a resource ID for `compliance_resource_id`. See [relevant part of AWS Docs](http://docs.aws.amazon.com/config/latest/APIReference/API_ResourceIdentifier.html#config-Type-ResourceIdentifier-resourceType) for available types.
* `tagKey` (`pulumi.Input[str]`) - The tag key that is applied to only those AWS resources that you want you
want to trigger an evaluation for the rule.
* `tagValue` (`pulumi.Input[str]`) - The tag value applied to only those AWS resources that you want to trigger an evaluation for the rule.
The **source** object supports the following:
* `owner` (`pulumi.Input[str]`) - Indicates whether AWS or the customer owns and manages the AWS Config rule. Valid values are `AWS` or `CUSTOM_LAMBDA`. For more information about managed rules, see the [AWS Config Managed Rules documentation](https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config_use-managed-rules.html). For more information about custom rules, see the [AWS Config Custom Rules documentation](https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config_develop-rules.html). Custom Lambda Functions require permissions to allow the AWS Config service to invoke them, e.g. via the `lambda.Permission` resource.
* `sourceDetails` (`pulumi.Input[list]`) - Provides the source and type of the event that causes AWS Config to evaluate your AWS resources. Only valid if `owner` is `CUSTOM_LAMBDA`.
* `eventSource` (`pulumi.Input[str]`) - The source of the event, such as an AWS service, that triggers AWS Config
to evaluate your AWS resources. This defaults to `aws.config` and is the only valid value.
* `maximum_execution_frequency` (`pulumi.Input[str]`) - The frequency that you want AWS Config to run evaluations for a rule that
is triggered periodically. If specified, requires `message_type` to be `ScheduledNotification`.
* `messageType` (`pulumi.Input[str]`) - The type of notification that triggers AWS Config to run an evaluation for a rule. You can specify the following notification types:
* `sourceIdentifier` (`pulumi.Input[str]`) - For AWS Config managed rules, a predefined identifier, e.g `IAM_PASSWORD_POLICY`. For custom Lambda rules, the identifier is the ARN of the Lambda Function, such as `arn:aws:lambda:us-east-1:123456789012:function:custom_rule_name` or the `arn` attribute of the `lambda.Function` resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["description"] = description
__props__["input_parameters"] = input_parameters
__props__["maximum_execution_frequency"] = maximum_execution_frequency
__props__["name"] = name
__props__["rule_id"] = rule_id
__props__["scope"] = scope
__props__["source"] = source
__props__["tags"] = tags
return Rule(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 66.412186
| 669
| 0.690647
|
5f34c62baad1916f21ac0441a9899ff2bec5a268
| 5,513
|
py
|
Python
|
main/driveupload.py
|
siddhantkhandelwal/pep-website
|
4d8b0fc7d2c7fe41c3497e64b8a3c34f02c13bf9
|
[
"MIT"
] | null | null | null |
main/driveupload.py
|
siddhantkhandelwal/pep-website
|
4d8b0fc7d2c7fe41c3497e64b8a3c34f02c13bf9
|
[
"MIT"
] | 3
|
2021-02-08T20:28:24.000Z
|
2021-06-10T21:04:27.000Z
|
main/driveupload.py
|
siddhantkhandelwal/pep-website
|
4d8b0fc7d2c7fe41c3497e64b8a3c34f02c13bf9
|
[
"MIT"
] | null | null | null |
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
import sys
import os
import datetime
from django.utils import timezone
import pytz
from main.models import Abstract, Paper, ParticipantProfile, ProfessorProfile, StaffProfile, College, SupervisorProfile, Category
def upload_thread(pk):
global uploaded_files
global uploaded_files_path
uploaded_files = []
uploaded_files_path = os.path.join(os.path.dirname(
os.path.abspath(__file__)), "uploaded_files")
if pk == 0:
with open(uploaded_files_path, "r+") as f:
uploaded_files = f.read().splitlines()
elif pk == 1:
with open(uploaded_files_path, "r+") as f:
f.truncate()
gauth = GoogleAuth()
gauth.LoadCredentialsFile("creds.txt")
if gauth.credentials is None:
# Authenticate if they're not there
gauth.LocalWebserverAuth()
elif gauth.access_token_expired:
# Refresh them if expired
gauth.Refresh()
else:
# Initialize the saved creds
gauth.Authorize()
# Save the current credentials to a file
gauth.SaveCredentialsFile("creds.txt")
drive = GoogleDrive(gauth) # authentication.
try:
root_folder_name = 'PEP Portal'
root_folder_id = create_root_folder(drive, root_folder_name)
uploaded_files.append(root_folder_name)
# category_folders_dict = create_category_folders(
# drive, root_folder_id)
date_folder_name = 'Upto 15th Nov'
# if date_folder_name not in uploaded_files:
date_folder_id_upto_15th = create_folder(
drive, date_folder_name, root_folder_id)
uploaded_files.append(date_folder_name + " - " +
date_folder_id_upto_15th)
# else:
# date_folder_id_upto_15th = [entry.split(' - ')[2] for entry in uploaded_files if entry.contains('Upto 15th Nov - ')]
date_folder_name = 'After 15th Nov'
# if date_folder_name not in uploaded_files:
date_folder_id_after_15th = create_folder(
drive, date_folder_name, root_folder_id)
uploaded_files.append(date_folder_name + "-" +
date_folder_id_after_15th)
# else:
# date_folder_id_after_15th = [entry.split(' - ')[2] for entry in uploaded_files if entry.contains('After 15th Nov - ')]
category_folders_dict_upto_15th = create_category_folders(
drive, date_folder_id_upto_15th)
category_folders_dict_after_15th = create_category_folders(
drive, date_folder_id_after_15th)
for category in Category.objects.all():
# id = category_folders_dict[category.name]
for abstract in Abstract.objects.filter(category=category):
if abstract.document.name.split("/")[2] not in uploaded_files:
if abstract.submission_date <= timezone.datetime(2018, 11, 17).replace(tzinfo=pytz.timezone('Asia/Kolkata')):
id = category_folders_dict_upto_15th[category.name]
else:
id = category_folders_dict_after_15th[category.name]
file = drive.CreateFile(metadata={"title": abstract.document.name.split("/")[2],
"parents": [{"kind": "drive#fileLink",
"id": id}]})
file.SetContentFile(abstract.document.path)
file.Upload()
uploaded_files.append(file['title'])
return 'Task Completed'
except:
return 'Error'
finally:
with open(uploaded_files_path, "w+") as f:
for uploaded_file in uploaded_files:
f.write(uploaded_file + '\n')
def get_file_list(drive):
file_list = drive.ListFile(
{'q': "'root' in parents and trashed=false"}).GetList()
return file_list
def search_file_in_list(file_list, file_name_to_search):
for file in file_list:
if file['title'] == file_name_to_search:
id = file['id']
return id
return -1
def create_folder(drive, folder_name, parent_folder_id=''):
folder_metadata = {
'title': folder_name,
# The mimetype defines this new file as a folder, so don't change this.
'mimeType': 'application/vnd.google-apps.folder',
}
if parent_folder_id != '':
folder_metadata['parents'] = [{'id': parent_folder_id}]
folder = drive.CreateFile(folder_metadata)
folder.Upload()
return folder['id']
# return search_file_in_list(get_file_list(drive), folder_name)
def create_root_folder(drive, root_folder_name):
file_list = get_file_list(drive)
id = search_file_in_list(file_list, root_folder_name)
if id == -1:
create_folder(drive, root_folder_name)
id = search_file_in_list(get_file_list(drive), root_folder_name)
return id
def create_category_folders(drive, root_folder_id):
category_folders_details = {}
categories = Category.objects.all()
for category in categories:
if root_folder_id + category.name not in uploaded_files:
id = create_folder(drive, category.name, root_folder_id)
category_folders_details[category.name] = id
uploaded_files.append(root_folder_id + category.name)
return category_folders_details
def execute():
upload_thread(1)
print("Starting Execution")
| 36.753333
| 129
| 0.641937
|
59a5ac8062fdf6f6ef4fbdac2e93e8b986448a18
| 1,055
|
py
|
Python
|
apps/establishment_system/lookup.py
|
camilortte/RecomendadorUD
|
ebf9ee4482c4093d4751a27c90f56637a9c692a4
|
[
"MIT"
] | 4
|
2015-01-29T17:17:26.000Z
|
2021-03-03T08:17:03.000Z
|
apps/establishment_system/lookup.py
|
camilortte/RecomendadorUD
|
ebf9ee4482c4093d4751a27c90f56637a9c692a4
|
[
"MIT"
] | null | null | null |
apps/establishment_system/lookup.py
|
camilortte/RecomendadorUD
|
ebf9ee4482c4093d4751a27c90f56637a9c692a4
|
[
"MIT"
] | 1
|
2015-09-22T08:35:26.000Z
|
2015-09-22T08:35:26.000Z
|
"""
Por eliminar
"""
# from .models import Categoria,SubCategoria,Establecimiento
# from selectable.base import ModelLookup
# from selectable.registry import registry
# class EstablecimientoLookUp(ModelLookup):
# model = Establecimiento
# search_fields = ('nombre__icontains','email', )
# class SubCategoriaLookUp(ModelLookup):
# model = SubCategoria
# search_fields = ('tag__icontains', )
# def get_query(self, request, term):
# results = super(SubCategoriaLookUp, self).get_query(request, term)
# print request.GET
# categoria = request.GET.get('categorias', '')
# print "Categoria: ",categoria
# if categoria:
# results = results.filter(categorias=categoria)
# else:
# results = results.none()
# return results
# class CategoriaLookUp(ModelLookup):
# model = Categoria
# search_fields = ('tag__icontains', )
# registry.register(EstablecimientoLookUp)
# registry.register(SubCategoriaLookUp)
# registry.register(CategoriaLookUp)
| 29.305556
| 76
| 0.676777
|
5e904397bc9e403254ae7d7657494e0307c11b82
| 7,150
|
py
|
Python
|
rain_animation.py
|
claytongulick/blinky_lights
|
84064264637cf6bb6c62100b5b8bd60fa8ef1af2
|
[
"MIT"
] | null | null | null |
rain_animation.py
|
claytongulick/blinky_lights
|
84064264637cf6bb6c62100b5b8bd60fa8ef1af2
|
[
"MIT"
] | null | null | null |
rain_animation.py
|
claytongulick/blinky_lights
|
84064264637cf6bb6c62100b5b8bd60fa8ef1af2
|
[
"MIT"
] | 1
|
2021-08-22T09:28:33.000Z
|
2021-08-22T09:28:33.000Z
|
from random import random
from animation import Animation
from pprint import pprint
import time
import logging
import math
import sys
from config import *
from traceback import print_exception
class RainAnimation(Animation):
MAX_LINES = 25
class Line:
def __init__(self, fade_speed, color, x, y, velocity):
self.buffer = [POWER, 0.0, 0.0, 0.0] * PANEL_Y
self.is_dead = False
if not x:
x = random() * (PANEL_X - 1)
self.x = int(x)
if not y:
y = random() * (PANEL_Y - 1)
self.current_y = y
self.y = int(round(y))
self.buffer[self.y * 4:(self.y * 4) + 4] = [POWER, 255.0,255.0,255.0]
if not velocity:
velocity = .200
if velocity < .1:
velocity = .1
self.velocity = velocity
if not color:
color = [POWER, 0.0,255.0,0.0]
self.color = color
#normalize based on a % of 255
self.fade_rate = [color[1]/255.0, color[2]/255.0, color[3]/255.0]
#change the fade rate based on normalized color component
self.fade_rate = [self.fade_rate[0] * fade_speed,
self.fade_rate[1] * fade_speed,
self.fade_rate[2] * fade_speed]
self.render_count = 0
#logging.debug(self.fade_rate)
def fade(self):
for i in range(PANEL_Y):
offset = i * 4
if i == self.y:
#logging.debug("skipping: " + str(i))
#logging.debug( self.buffer[offset:offset+4] )
continue
self.buffer[offset:offset+4] = [
self.buffer[offset], #power
self.buffer[offset + 1] * (1-self.fade_rate[0]),
self.buffer[offset + 2] * (1-self.fade_rate[1]),
self.buffer[offset + 3] * (1-self.fade_rate[2])
]
#logging.debug("fade")
#logging.debug(self.buffer)
def tick(self, adjust_brightness):
now = time.clock()
self.fade()
self.die()
if self.current_y > PANEL_Y + 1:
return
last_y = int(round(self.current_y))
#move
self.current_y = self.current_y + self.velocity #this is the floating point location
self.y = int(round(self.current_y)) #this is the integer, rounded location
#if we've moved down a pixel
if self.y > last_y:
#set the previous location to the current color
offset = last_y * 4
self.buffer[offset:offset+4] = self.color
#logging.debug("self.y:" + str(self.y) + " last_y: " + str(last_y))
#white leading pixel
if self.y <= PANEL_Y:
offset = self.y * 4
self.buffer[offset:offset+4] = [POWER, 255.0,255.0,255.0]
#logging.debug("current_y:" + str(self.current_y) + " y:" + str(self.y) + " last_y:" + str(last_y))
#logging.debug(self.buffer)
def render(self, buffer):
#self.render_count = self.render_count + 1
#if self.render_count > 5:
# sys.exit(0)
#logging.debug(self.buffer)
for i in range(PANEL_Y):
offset = ((PANEL_X * i) + self.x) * 4
self_offset = i * 4
for j in range(4):
#logging.debug(str(offset+j))
try:
buffer[offset+j] = int(round(self.buffer[self_offset+j]))
except:
print_exception(*sys.exc_info())
logging.debug("self.x:" + str(self.x) + "i:" + str(i) + "offset:" + str(offset) + " self offset:"+str(self_offset))
def die(self):
"""check to see if all of the pixels in the buffer have faded to 0,
if so, we're dead"""
self.is_dead = True
for i in range(PANEL_Y):
offset = i * 4
if self.buffer[offset+1] > 1 or \
self.buffer[offset+2] > 1 or \
self.buffer[offset+3] > 1:
self.is_dead = False
return
#logging.debug("line dead")
def __init__(self, spawn_rate, fade_speed, color):
Animation.__init__(self)
if not spawn_rate:
spawn_rate = 1/self.MAX_LINES
self.spawn_rate = spawn_rate
self.fade_speed = fade_speed
self.color = color
self.last_spawn_time = None
self.start_time = None
self.width = PANEL_X
self.height = PANEL_Y
self.lines = []
def load(self, folder):
pass
def clear(self):
self.buffer = [POWER,0,0,0] * PANEL_X * PANEL_Y
def get_next_frame(self):
self.clear()
now = time.clock()
if not self.start_time:
self.start_time = now
if not self.last_frame_time:
self.last_frame_time = now
delta = now - self.last_frame_time
delay = int(self.frame_delay) / 1000.0
if not self.last_spawn_time:
self.last_spawn_time = now
if (now - self.last_spawn_time) > self.spawn_rate:
#logging.debug("current color: " + str(self.current_color))
if len(self.lines) < self.MAX_LINES:
line = self.Line(self.fade_speed,
self.color,
round(random() * (PANEL_X - 1)),
round(random() * PANEL_Y / 3), # don't want trails starting right at the bottom
random()/10) # velocity
self.last_spawn_time = now
#[POWER, int(random()*255),int(random()*255),int(random() * 255)], None, None)
self.lines.append(line)
#cull any dead lines
self.lines = [line for line in self.lines if not line.is_dead]
for line in self.lines:
line.tick(self.adjust_brightness)
line.render(self.buffer)
#logging.debug("now: " + str(now) + "last frame:" + str(self.last_frame_time) + " delta:" + str(delta) + " delay:" + str(delay) + " frame_delay:" + str(self.frame_delay))
if delta < delay:
return None
self.last_frame_time = now
return self.buffer
| 36.85567
| 178
| 0.464196
|
4537158f84466009a8fa4ce57c8866c1f3e48de2
| 856
|
py
|
Python
|
version_control/scripts/upgrade/versions/v_00005.py
|
yaoyansibase/mymagicbox
|
e6e720aa7c45c5b059a953101d698c6b76212ea6
|
[
"MIT"
] | 1
|
2018-08-02T03:59:04.000Z
|
2018-08-02T03:59:04.000Z
|
version_control/scripts/upgrade/versions/v_00005.py
|
yaoyansibase/mymagicbox
|
e6e720aa7c45c5b059a953101d698c6b76212ea6
|
[
"MIT"
] | null | null | null |
version_control/scripts/upgrade/versions/v_00005.py
|
yaoyansibase/mymagicbox
|
e6e720aa7c45c5b059a953101d698c6b76212ea6
|
[
"MIT"
] | null | null | null |
import maya.cmds as cmds
import mymagicbox.log as log
class Upgrade(object):
def __init__(self):
self.thisVersion = 00005;
def do(self, nodes):
log.info('------------------------------------');
log.info(' Trying To Upgrade to %s ... ', self.thisVersion);
for node in nodes:
if cmds.getAttr(node+'.mmbversion') >= self.thisVersion:
continue;# skip the newer node
nodeType = cmds.nodeType(node);
if nodeType == 'testNodeA':
self.on_testNodeA(node);
elif nodeType == 'testNodeB':
self.on_testNodeB(node);
# update $node.mmbversion to thisVersion
cmds.setAttr(node+'.mmbversion', self.thisVersion);
log.info('\n\n\n');
def on_testNodeA(self, node):
log.debug('upgrading %s to %s', node, self.thisVersion);
def on_testNodeB(self, node):
log.debug('upgrading %s to %s', node, self.thisVersion);
| 24.457143
| 69
| 0.643692
|
b5ea39484116d62db440b1ec664d1b3fecd494cc
| 4,145
|
py
|
Python
|
bot.py
|
V-I-C-T-O-R/rasa-chatbot
|
9c99a35e9a0d7e75fdc461fa0039d434b8083c33
|
[
"Apache-2.0"
] | 9
|
2018-10-26T08:12:13.000Z
|
2020-11-26T01:05:17.000Z
|
bot.py
|
V-I-C-T-O-R/rasa-chatbot
|
9c99a35e9a0d7e75fdc461fa0039d434b8083c33
|
[
"Apache-2.0"
] | 2
|
2018-12-19T06:09:30.000Z
|
2019-09-05T05:44:41.000Z
|
bot.py
|
V-I-C-T-O-R/rasa-chatbot
|
9c99a35e9a0d7e75fdc461fa0039d434b8083c33
|
[
"Apache-2.0"
] | 4
|
2018-12-18T08:55:36.000Z
|
2019-08-23T15:09:39.000Z
|
import argparse
import warnings
from gevent.pywsgi import WSGIServer
from rasa_core import train
from rasa_core.agent import Agent
from rasa_core.interpreter import NaturalLanguageInterpreter, RasaNLUInterpreter
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
from rasa_core.run import serve_application,load_agent
from rasa_core.training import online
from rasa_core.utils import AvailableEndpoints
from rasa_core_sdk.endpoint import endpoint_app
def train_nlu():
from rasa_nlu.training_data import load_data
from rasa_nlu.model import Trainer
from rasa_nlu import config
configs = config.load("config/nlu_model_config.yml")
project = configs.get("project")
model = configs.get("fixed_model_name")
path = configs.get("path")
num_threads = configs.get('num_threads')
nlu_data_path = str(configs.get("data"))
training_data = load_data(nlu_data_path)
trainer = Trainer(configs)
trainer.train(training_data, num_threads=num_threads)
model_directory = trainer.persist(path=path, project_name=project, fixed_model_name=model)
return model_directory
def train_core(domain_file="config/domain.yml",
model_path="models/dialogue",
training_data_file="config/stories.md"):
from rasa_core.featurizers import (MaxHistoryTrackerFeaturizer,
BinarySingleStateFeaturizer)
agent = Agent(domain_file,
policies=[MemoizationPolicy(max_history=6),
KerasPolicy(MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(), max_history=6))])
training_data = agent.load_data(training_data_file)
# 训练agent的策略policy
agent.train(training_data, epochs=800)
agent.persist(model_path)
return agent
def run_online(domain_file="config/domain.yml", stories_file="config/stories.md", output_path="models/dialogue",
max_history=3, kwargs={"batch_size": 50, "epochs": 800, "max_training_samples": 300}):
interpreter = RasaNLUInterpreter("models/ticket/nlu_bot")
agent = train.train_dialogue_model(domain_file=domain_file,
interpreter=interpreter,
stories_file=stories_file,
output_path=output_path,
max_history=max_history,
endpoints=AvailableEndpoints.read_endpoints("config/endpoints.yml"),
kwargs=kwargs)
online.run_online_learning(agent)
def endpoints(action='actions.ticket'):
edp_app = endpoint_app(action_package_name=action)
http_server = WSGIServer(('0.0.0.0',5055), edp_app)
print("Starting action endpoint server...")
http_server.start()
print("Action endpoint is up and running. on {}"
"".format(http_server.address))
http_server.serve_forever()
def run():
endpoints = AvailableEndpoints.read_endpoints('config/endpoints.yml')
interpreter = NaturalLanguageInterpreter.create('models/ticket/nlu_bot',endpoints.nlu)
agent = load_agent("models/dialogue", interpreter=interpreter, endpoints=endpoints)
serve_application(agent,channel='rest')
# serve_application(agent)
return agent
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="starts the bot")
parser.add_argument(
"task",
choices=["train-nlu", "train-core",'endpoints', "run", "online-train"],
help="what the bot should do - e.g. run or train?")
task = parser.parse_args().task
# decide what to do based on first parameter of the script
if task == "train-nlu":
train_nlu()
elif task == "train-core":
train_core()
elif task == "endpoints":
endpoints()
elif task == "run":
run()
elif task == "online":
run_online()
else:
warnings.warn("Need to pass either 'train-nlu','endpoints', 'train-core','run', or 'online' to use the script.")
exit(1)
| 38.027523
| 120
| 0.672859
|
37ee14322f255d2bb4f46f05e32976ee6c2e1a4d
| 2,787
|
py
|
Python
|
src/main/python/ui/savechart.py
|
3ll3d00d/pypolarmap
|
d96e16820b4da29ec33271abd34dc33c587e7657
|
[
"MIT"
] | 1
|
2020-02-12T12:33:26.000Z
|
2020-02-12T12:33:26.000Z
|
src/main/python/ui/savechart.py
|
3ll3d00d/pypolarmap
|
d96e16820b4da29ec33271abd34dc33c587e7657
|
[
"MIT"
] | 10
|
2018-06-04T18:09:59.000Z
|
2020-02-01T11:24:54.000Z
|
src/main/python/ui/savechart.py
|
3ll3d00d/pypolarmap
|
d96e16820b4da29ec33271abd34dc33c587e7657
|
[
"MIT"
] | 1
|
2019-09-30T04:35:00.000Z
|
2019-09-30T04:35:00.000Z
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'savechart.ui'
#
# Created by: PyQt5 UI code generator 5.13.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_saveChartDialog(object):
def setupUi(self, saveChartDialog):
saveChartDialog.setObjectName("saveChartDialog")
saveChartDialog.setWindowModality(QtCore.Qt.ApplicationModal)
saveChartDialog.resize(259, 155)
saveChartDialog.setModal(True)
self.gridLayout = QtWidgets.QGridLayout(saveChartDialog)
self.gridLayout.setObjectName("gridLayout")
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.widthPixels = QtWidgets.QSpinBox(saveChartDialog)
self.widthPixels.setMinimum(1)
self.widthPixels.setMaximum(8192)
self.widthPixels.setObjectName("widthPixels")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.widthPixels)
self.heightPixels = QtWidgets.QSpinBox(saveChartDialog)
self.heightPixels.setEnabled(False)
self.heightPixels.setMinimum(1)
self.heightPixels.setMaximum(8192)
self.heightPixels.setObjectName("heightPixels")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.heightPixels)
self.label = QtWidgets.QLabel(saveChartDialog)
self.label.setObjectName("label")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label)
self.label_2 = QtWidgets.QLabel(saveChartDialog)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.gridLayout.addLayout(self.formLayout, 0, 0, 1, 1)
self.buttonBox = QtWidgets.QDialogButtonBox(saveChartDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Save)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 1, 0, 1, 1)
self.retranslateUi(saveChartDialog)
self.buttonBox.accepted.connect(saveChartDialog.accept)
self.buttonBox.rejected.connect(saveChartDialog.reject)
self.widthPixels.valueChanged['int'].connect(saveChartDialog.updateHeight)
QtCore.QMetaObject.connectSlotsByName(saveChartDialog)
def retranslateUi(self, saveChartDialog):
_translate = QtCore.QCoreApplication.translate
saveChartDialog.setWindowTitle(_translate("saveChartDialog", "Save Chart"))
self.label.setText(_translate("saveChartDialog", "Width"))
self.label_2.setText(_translate("saveChartDialog", "Height"))
| 48.051724
| 108
| 0.733405
|
2fc321564ec22a3fc08d852dadd83738de2b7bb9
| 349
|
py
|
Python
|
languages/python/web_codepad_post.py
|
Andilyn/learntosolveit
|
fd15345c74ef543e4e26f4691bf91cb6dac568a4
|
[
"BSD-3-Clause"
] | 1
|
2021-04-09T04:15:24.000Z
|
2021-04-09T04:15:24.000Z
|
languages/python/web_codepad_post.py
|
Andilyn/learntosolveit
|
fd15345c74ef543e4e26f4691bf91cb6dac568a4
|
[
"BSD-3-Clause"
] | null | null | null |
languages/python/web_codepad_post.py
|
Andilyn/learntosolveit
|
fd15345c74ef543e4e26f4691bf91cb6dac568a4
|
[
"BSD-3-Clause"
] | 1
|
2021-07-31T02:45:29.000Z
|
2021-07-31T02:45:29.000Z
|
import urllib2
import urllib
url = 'http://codepad.org'
with open('locate.py') as f:
code = f.read()
parameters = {'project':'uthcode',
'lang':'Python',
'code': code,
'private':'',
'run':'False',
'submit':'Submit'}
seq = urllib.urlencode(parameters)
r = urllib2.urlopen(url,seq)
print r.url + '/fork'
| 18.368421
| 34
| 0.575931
|
8c477ee9e508168b7fdfad27e9932756e083549e
| 236
|
py
|
Python
|
Server/Python/src/dbs/dao/MySQL/ProcessedDataset/GetID.py
|
vkuznet/DBS
|
14df8bbe8ee8f874fe423399b18afef911fe78c7
|
[
"Apache-2.0"
] | 8
|
2015-08-14T04:01:32.000Z
|
2021-06-03T00:56:42.000Z
|
Server/Python/src/dbs/dao/MySQL/ProcessedDataset/GetID.py
|
yuyiguo/DBS
|
14df8bbe8ee8f874fe423399b18afef911fe78c7
|
[
"Apache-2.0"
] | 162
|
2015-01-07T21:34:47.000Z
|
2021-10-13T09:42:41.000Z
|
Server/Python/src/dbs/dao/MySQL/ProcessedDataset/GetID.py
|
yuyiguo/DBS
|
14df8bbe8ee8f874fe423399b18afef911fe78c7
|
[
"Apache-2.0"
] | 16
|
2015-01-22T15:27:29.000Z
|
2021-04-28T09:23:28.000Z
|
#!/usr/bin/env python
"""
This module provides ProcessedDataset.GetID data access object.
"""
from dbs.dao.Oracle.ProcessedDataset.GetID import GetID as OraProcessedDatasetGetID
class GetID(OraProcessedDatasetGetID):
pass
| 23.6
| 83
| 0.771186
|
5717a9780ae8288174a5ec87f1881e0e25d549ce
| 5,865
|
py
|
Python
|
docs/source/conf.py
|
aeroramesh/eMInd
|
a699681c53637bc5b2ef2608fd4e5cb8ebc7b19d
|
[
"MIT"
] | 98
|
2019-09-13T16:00:57.000Z
|
2022-03-25T05:15:36.000Z
|
docs/source/conf.py
|
aeroramesh/eMInd
|
a699681c53637bc5b2ef2608fd4e5cb8ebc7b19d
|
[
"MIT"
] | 269
|
2019-08-22T01:47:09.000Z
|
2021-12-01T14:47:47.000Z
|
docs/source/conf.py
|
aeroramesh/eMInd
|
a699681c53637bc5b2ef2608fd4e5cb8ebc7b19d
|
[
"MIT"
] | 5
|
2021-05-07T11:11:40.000Z
|
2022-03-29T08:38:33.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
import nodeeditor
# -- Project information -----------------------------------------------------
project = 'NodeEditor'
copyright = '2019, Pavel Křupala'
author = 'Pavel Křupala'
# The short X.Y version
version = nodeeditor.__version__
# The full version, including alpha/beta/rc tags
release = nodeeditor.__version__
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
import sphinx_rtd_theme
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosectionlabel',
'sphinx_rtd_theme',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'recommonmark',
]
autosectionlabel_prefix_document = True
autodoc_member_order = 'bysource'
autoclass_content = "both"
from recommonmark.transform import AutoStructify
github_doc_root = 'https://github.com/rtfd/recommonmark/tree/master/doc/'
def setup(app):
app.add_config_value('recommonmark_config', {
# 'url_resolver': lambda url: github_doc_root + url,
'auto_toc_tree_section': 'Contents',
}, True)
app.add_transform(AutoStructify)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = ["_themes", ]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'NodeEditordoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'NodeEditor.tex', 'NodeEditor Documentation',
'Pavel Křupala', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nodeeditor', 'NodeEditor Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'NodeEditor', 'NodeEditor Documentation',
author, 'NodeEditor', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| 29.621212
| 79
| 0.66445
|
eb282e96df605c49958261d1bcdd1be576d4b1bf
| 3,574
|
py
|
Python
|
story_chain/flaskrunner.py
|
muchu1983/story_chain
|
3af4bb158be128a52c753f88eaffaed872d85880
|
[
"BSD-3-Clause"
] | null | null | null |
story_chain/flaskrunner.py
|
muchu1983/story_chain
|
3af4bb158be128a52c753f88eaffaed872d85880
|
[
"BSD-3-Clause"
] | null | null | null |
story_chain/flaskrunner.py
|
muchu1983/story_chain
|
3af4bb158be128a52c753f88eaffaed872d85880
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2015, MuChu Hsu
Contributed by Muchu Hsu (muchu1983@gmail.com)
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
import json
from flask import Flask
from flask import request
from flask import render_template
from flask import jsonify
from story_chain.localdb import LocalDbForStoryChain
app = Flask(__name__.split(".")[0])
#啟動 server
def start_flask_server():
app.run(host="0.0.0.0", port=5000, debug=True)
#建立 jsonp response
def make_jsonp_response(dicJsonObj=None):
strCallback = request.args.get("strJsonpCallback", 0, type=str)
return strCallback + "(" + json.dumps(dicJsonObj) + ")"
#在指定的段落之後 加入新的故事段落 (return 新段落 id)
@app.route("/story_chain/api_post/story", methods=["GET"])
def apiPostNewStory():
db = LocalDbForStoryChain()
strStoryContent = request.args.get("str_story_content", type=str)
intPrevStoryId = request.args.get("int_prev_story_id", type=int)
intNewStoryId = db.insertNewStory(strContent=strStoryContent, intPrevId=intPrevStoryId)
return make_jsonp_response(dicJsonObj={"new_story_id":intNewStoryId})
#取得指定段落內容
@app.route("/story_chain/api_get/story/<int:intStoryId>", methods=["GET"])
def apiGetStoryById(intStoryId=0):
db = LocalDbForStoryChain()
(strContent, intLike, intDislike) = db.fetchStoryById(intStoryId=intStoryId)
dicJsonObj = {"str_content":strContent,
"int_like":intLike,
"int_dislike":intDislike}
return make_jsonp_response(dicJsonObj=dicJsonObj)
#修改指定段落內容 (按贊/按噓)
@app.route("/story_chain/api_put/story/<int:intStoryId>", methods=["GET"])
def apiPutStoryById(intStoryId=0):
pass
#取得 前 or 後 故事段 列表 (return 段落 id list)
@app.route("/story_chain/api_get/story", methods=["GET"])
def apiGetStoryList():
db = LocalDbForStoryChain()
strType = request.args.get("str_type", type=str) #"next" or "prev"
intStoryId = request.args.get("int_story_id", type=int)
lstIntStoryId = db.fetchNextOrPrevStoryId(intStoryId=intStoryId, strFetchType=strType)
dicJsonObj = None
if strType == "prev":
#前一段必定是唯一的
dicJsonObj = {"int_prev_story_id":(lstIntStoryId[0] if lstIntStoryId else 0)}
elif strType == "next":
#下一段可能有多個選擇
dicJsonObj = {"lst_int_next_story_id":lstIntStoryId}
else:
dicJsonObj = {}
return make_jsonp_response(dicJsonObj)
#讀取書籤
@app.route("/story_chain/api_get/tag/<strTagName>", methods=["GET"])
def apiGetTagByName(strTagName=None):
pass
#新增書籤 (書籤有時限)
@app.route("/story_chain/api_post/tag", methods=["GET"])
def apiPostTag(strTagName=None):
request.args.get("strTagName")
request.args.get("intStoryId")
pass
#= Flask 範例 =
#GET POST參數範例
@app.route("/hello/<username>/<int:num>", methods=["GET", "POST"])
def hello(username, num):
#http://192.168.1.101:5000/hello/muchu/7?love=lunna
request.form #get form data when POST
return "Hello World! %s %d method: %s args: %s"%(username, num,
request.method, request.args.get("love"))
#template範例
@app.route("/template/")
@app.route("/template/<name>")
def template(name=None):
return render_template("temp.html", name=name)
#post json範例
@app.route("/jsonpapi", methods=["GET"])
def jsonpapi():
x = request.args.get("x", 0, type=int)
y = request.args.get("y", 0, type=int)
dicResultJson = {"result":x+y}
return make_jsonp_response(dicJsonObj=dicResultJson)
if __name__ == "__main__":
start_flask_server()
| 34.038095
| 94
| 0.689144
|
1354c689298d20a92ecea8e5b49d805dfb56c133
| 4,002
|
py
|
Python
|
ML6_fruits.py
|
maedora/apphy186MLexercises
|
8086b9a98041ecfe332421e3637b97b37a0864e7
|
[
"MIT"
] | null | null | null |
ML6_fruits.py
|
maedora/apphy186MLexercises
|
8086b9a98041ecfe332421e3637b97b37a0864e7
|
[
"MIT"
] | null | null | null |
ML6_fruits.py
|
maedora/apphy186MLexercises
|
8086b9a98041ecfe332421e3637b97b37a0864e7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 19 13:51:29 2021
@author: Alena Edora
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score, \
confusion_matrix, ConfusionMatrixDisplay
df = pd.read_excel('classification_features.xlsx')
# Get names of indexes to drop
indexNames = df[df['Class'] == 'Banana'].index
# Delete these row indexes from dataFrame
df.drop(indexNames, inplace=True)
# Replacing Mango and Orange with 1 and -1
df = df.replace(['Mango','Orange'], [1,0])
# splitting the test and training data set
mango_df_train = df[df['Class']==1].head(18)
mango_df_test = df[df['Class']==1].tail(17)
orange_df_train = df[df['Class']==0].head(18)
orange_df_test = df[df['Class']==0].tail(17)
df_train = pd.concat([mango_df_train, orange_df_train], axis=0)
df_test = pd.concat([mango_df_test, orange_df_test], axis=0)
train_samples = df_train.shape[0] # no. of samples of train set
test_samples = df_test.shape[0] # no. of samples of test set
# input features + bias
x1_train = df_train[['Normalized Hue','NormRound']].values
x0_train = np.ones((train_samples,1))
x_train = np.concatenate((x0_train,x1_train), axis=1)
x1_test = df_test[['Normalized Hue','NormRound']].values
x0_test = np.ones((test_samples,1))
x_test = np.concatenate((x0_test,x1_test), axis=1)
# true values
t_train = df_train[['Class']].values
t_test = df_test[['Class']].values
# defining the functions to use
def sigmoid(z):
g = (1/(1+np.exp(-z)))
return g
def dsigmoid(g):
dg = g * (1 - g)
return dg
def linear(z):
g = z
return g
def dlinear(z):
dg = np.ones(z.shape)
return dg
def relu(z):
g = np.maximum(0,z)
return g
def drelu(z):
dg = np.maximum(0,1)
return dg
def step(z):
"""
only for accuracy purposes since the output values of the neural network are
not exactly at 1s and 0s
"""
y = []
for i in z.T:
if i > 0.5:
y.append(1)
else:
y.append(0)
return y
# initialization
eta = 0.01 # learning rate
epoch = 1000 # no. of times when all patterns have passed thru the network
hidden_node = 5 # no. of hidden nodes
output_node= 1 # no. of output nodes
feature_count = 3 # including bias
# plotting the cost function
SSE_cost = []
# weights init
w1 = np.random.rand(hidden_node, feature_count) - 0.5 # (5,3)
w2 = np.random.rand(output_node, hidden_node) - 0.5 # (1,5)
for i in range(epoch):
# first layer
a1 = np.dot(w1,x_train.T)
z1 = np.array(relu(a1))
# second layer
a2 = np.dot(w2,z1)
z2 = np.array(relu(a2)) # also yk
# computing error of output unit
delta_2 = drelu(a2) * (z2-t_train.T)
delta_1 = drelu(z1) * np.dot(delta_2.T,w2).T
# computing the error derivatives of the samples
dE_2 = np.dot(delta_2,z1.T)
dE_1 = np.dot(delta_1,x_train)
# computing for weight change
w2 += -eta * dE_2
w1 += -eta * dE_1
err = 0.5 * np.sum((z2-t_train.T)**2)
SSE_cost.append(err)
# testing the accuracy of the model
# y_pred = step(z2)
aj = np.dot(w1,x_test.T)
zj = np.array(relu(aj))
# second layer
ak = np.dot(w2,zj)
zk = np.array(relu(ak)) # also yk
y_pred = step(zk)
# prints accuracy of the model
print('Accuracy: ', accuracy_score(y_pred,t_test))
cm = confusion_matrix(y_pred, t_test)
disp = ConfusionMatrixDisplay(confusion_matrix=cm)
disp.plot()
plt.savefig('confusion_matrix-acc-0.8.png')
plt.show()
plt.plot(np.arange(0,epoch,1), SSE_cost)
plt.xlabel('No. of Epochs')
plt.ylabel('Cost Function')
plt.title('2-layer Neural Network Cost Function for Fruit Classification')
plt.savefig('cost_func_fruits.png')
plt.show()
| 25.0125
| 81
| 0.617691
|
adc0003dca3e60f178b34c38f66966626eb9a784
| 1,077
|
py
|
Python
|
nginpro/ngin.py
|
thesabbir/nginpro
|
b3ca2fbadc19fa28798434bb640f4d0a1bf742b9
|
[
"MIT"
] | 1
|
2020-12-28T16:33:54.000Z
|
2020-12-28T16:33:54.000Z
|
nginpro/ngin.py
|
thesabbir/nginpro
|
b3ca2fbadc19fa28798434bb640f4d0a1bf742b9
|
[
"MIT"
] | null | null | null |
nginpro/ngin.py
|
thesabbir/nginpro
|
b3ca2fbadc19fa28798434bb640f4d0a1bf742b9
|
[
"MIT"
] | null | null | null |
import argparse
from nginx_conf import server, reverse_proxy
from utils import to_nginx_template, make_indent, make_block
"""
Initiate argparse
"""
parser = argparse.ArgumentParser()
"""
Add arguments
"""
parser.add_argument("-r", "--revproxy", help="reverse proxy", action="store_true")
parser.add_argument("-n", "--name", help="server name or domain name", action="store")
parser.add_argument("-p", "--proxypass", help="proxy pass server", action="store")
"""
Parsing arguments
"""
args = parser.parse_args()
"""
Reverse proxy config generator
"""
if args.revproxy:
if args.name is None or args.proxypass is None:
raise SystemExit('Name and Pass is required!')
server['server_name'] = args.name
reverse_proxy['proxy_pass'] = args.proxypass
to_nginx_template(reverse_proxy)
location = make_block(name="location", content=to_nginx_template(reverse_proxy), pattern='/')
server = to_nginx_template(server)
block = '{} {}'.format(server, location)
conf = make_block(name="server", content=block, pattern="")
print make_indent(conf)
| 29.916667
| 97
| 0.715877
|
bfa274731702b7e93b1fde530bd4215f7eaa6a3d
| 854
|
py
|
Python
|
topics/DynamicProgramming/Paint_Fence_276/Paint_Fence_276.py
|
DmitryNaimark/leetcode-solutions-python
|
16af5f3a9cb8469d82b14c8953847f0e93a92324
|
[
"MIT"
] | 1
|
2019-10-31T11:06:23.000Z
|
2019-10-31T11:06:23.000Z
|
topics/DynamicProgramming/Paint_Fence_276/Paint_Fence_276.py
|
DmitryNaimark/leetcode-solutions-python
|
16af5f3a9cb8469d82b14c8953847f0e93a92324
|
[
"MIT"
] | null | null | null |
topics/DynamicProgramming/Paint_Fence_276/Paint_Fence_276.py
|
DmitryNaimark/leetcode-solutions-python
|
16af5f3a9cb8469d82b14c8953847f0e93a92324
|
[
"MIT"
] | null | null | null |
# https://leetcode.com/problems/paint-fence/
# ---------------------------------------------------
# Runtime Complexity: O(N)
# Space Complexity: O(1)
class Solution:
def numWays(self, n: int, k: int) -> int:
if n == 0:
return 0
if n == 1:
return k
same_color = k
diff_color = k * (k - 1)
for i in range(2, n):
# Amount of ways to paint next fence the same color is the same as diff_color for the previous fence.
tmp = diff_color
diff_color = (diff_color + same_color) * (k - 1)
same_color = tmp
return same_color + diff_color
# ---------------------------------------------------
# Test Cases
# ---------------------------------------------------
solution = Solution()
# 6
print(solution.numWays(3, 2))
| 26.6875
| 113
| 0.443794
|
78606c340479b12753242b347ebfcd6ea59a504c
| 543
|
py
|
Python
|
Python/Linked List/traversal.py
|
msboffl/Data-Structures-and-Algorithms
|
ad823886f5c25b80487b42d92c9b71c5bdc5cf26
|
[
"MIT"
] | null | null | null |
Python/Linked List/traversal.py
|
msboffl/Data-Structures-and-Algorithms
|
ad823886f5c25b80487b42d92c9b71c5bdc5cf26
|
[
"MIT"
] | null | null | null |
Python/Linked List/traversal.py
|
msboffl/Data-Structures-and-Algorithms
|
ad823886f5c25b80487b42d92c9b71c5bdc5cf26
|
[
"MIT"
] | null | null | null |
class Node:
def __init__(self,data):
self.data = data
self.ref = None
class LinkedList:
def __init__(self):
self.head = None
# method for print LL
def printLL(self):
if(self.head == None):
print('Linked List is empty')
else:
n = self.head
while( n != None):
print(n.data)
n = n.ref
LL1 = LinkedList()
LL1.head = Node(10)
second = Node(20)
third = Node(30)
LL1.head.ref = second
second.ref = third
LL1.printLL()
| 19.392857
| 41
| 0.526703
|
3f852d385494598301a1f5e931efebc98c5cec89
| 8,833
|
py
|
Python
|
networks/network.py
|
StanfordVL/cavin
|
581f70fefb3a869db739d8539f3b74759ab71777
|
[
"MIT"
] | 17
|
2020-04-11T22:31:40.000Z
|
2021-08-16T09:29:16.000Z
|
networks/network.py
|
StanfordVL/cavin
|
581f70fefb3a869db739d8539f3b74759ab71777
|
[
"MIT"
] | null | null | null |
networks/network.py
|
StanfordVL/cavin
|
581f70fefb3a869db739d8539f3b74759ab71777
|
[
"MIT"
] | 4
|
2020-09-28T02:39:31.000Z
|
2021-05-16T13:44:29.000Z
|
"""Base extension to network to simplify copy operations.
Note: This file overrides Network and DistributionNetwork classes in TF-Agents
to get around Keras.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
# import sys
import six
import tensorflow as tf
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step
from tensorflow.python.training.tracking import base # TF internal
from tensorflow.python.util import tf_decorator # TF internal
from tensorflow.python.util import tf_inspect # TF internal
class _NetworkMeta(abc.ABCMeta):
"""Meta class for Network object.
We mainly use this class to capture all args to `__init__` of all `Network`
instances, and store them in `instance._saved_kwargs`. This in turn is
used by the `instance.copy` method.
"""
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Network class.
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
RuntimeError: if the class __init__ has *args in its signature.
"""
if baseclasses[0] == object:
# This is just Network below. Return early.
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
init = attrs.get("__init__", None)
if not init:
# This wrapper class does not define an __init__. When someone
# creates the object, the __init__ of its parent class will be
# called. We will call that __init__ instead separately since the
# parent class is also a subclass of Network. Here just create
# the class and return.
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
arg_spec = tf_inspect.getargspec(init)
if arg_spec.varargs is not None:
raise RuntimeError(
'%s.__init__ function accepts *args.'
'This is not allowed.' %
classname)
def _capture_init(self, *args, **kwargs):
"""Captures init args and kwargs into `_saved_kwargs`."""
if len(args) > len(arg_spec.args) + 1:
# Error case: more inputs than args. Call init so that the
# appropriate error can be raised to the user.
init(self, *args, **kwargs)
for i, arg in enumerate(args):
# Add +1 to skip `self` in arg_spec.args.
kwargs[arg_spec.args[1 + i]] = arg
init(self, **kwargs)
# Avoid auto tracking which prevents keras from tracking layers
# that are passed as kwargs to the Network.
with base.no_automatic_dependency_tracking_scope(self):
setattr(self, "_saved_kwargs", kwargs)
attrs["__init__"] = tf_decorator.make_decorator(init, _capture_init)
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
@six.add_metaclass(_NetworkMeta)
class Network(object):
"""Base extension to network to simplify copy operations."""
def __init__(self,
input_tensor_spec,
state_spec,
name,
mask_split_fn=None):
"""Creates an instance of `Network`.
Args:
input_tensor_spec: A nest of `tensor_spec.TensorSpec` representing
the input observations.
state_spec: A nest of `tensor_spec.TensorSpec` representing the
state needed by the network. Use () if none.
name: A string representing the name of the network.
mask_split_fn: A function used for masking valid/invalid actions
with each state of the environment. The function takes in a
full observation and returns a tuple consisting of 1) the part
of the observation intended as input to the network and 2) the
mask.
"""
self._name = name
self._input_tensor_spec = input_tensor_spec
self._output_tensor_spec = None
self._state_spec = state_spec
self._mask_split_fn = mask_split_fn
self._built = False
@property
def name(self):
return self._name
@property
def state_spec(self):
return self._state_spec
@property
def built(self):
return self._built
@property
def weights(self):
return self._weights
@property
def trainable_weights(self):
return self._trainable_weights
@property
def non_trainable_weights(self):
return self._non_trainable_weights
def create_variables(self):
if not self.built:
random_input = tensor_spec.sample_spec_nest(
self.input_tensor_spec, outer_dims=(1,))
step_type = tf.expand_dims(time_step.StepType.FIRST, 0)
output_tensors = self.__call__(random_input, step_type, None)
with tf.compat.v1.variable_scope(self._name):
self._weights = tf.compat.v1.get_collection(
key=tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,
scope=self._name)
self._trainable_weights = tf.compat.v1.trainable_variables(
scope=self._name)
self._non_trainable_weights = [
var for var in self._weights
if var not in self._trainable_weights]
if self._output_tensor_spec is None:
self._output_tensor_spec = tf.nest.map_structure(
lambda t: tensor_spec.TensorSpec.from_tensor(
tf.squeeze(t, axis=0)),
output_tensors)
@property
def input_tensor_spec(self):
"""Returns the spec of the input to the network of type InputSpec."""
return self._input_tensor_spec
@property
def output_tensor_spec(self):
"""Returns the spec of the input to the network of type OutputSpec."""
assert self._output_tensor_spec is not None
return self._output_tensor_spec
@property
def mask_split_fn(self):
"""Returns the mask_split_fn for handling masked actions."""
return self._mask_split_fn
@property
def variables(self):
"""Return the variables for all the network layers.
If the network hasn't been built, builds it on random input (generated
using self._input_tensor_spec) to build all the layers and their
variables.
Raises:
ValueError: If the network fails to build.
"""
assert self.built
return self.weights
@property
def trainable_variables(self):
"""Return the trainable variables for all the network layers.
If the network hasn't been built, builds it on random input (generated
using self._input_tensor_spec) to build all the layers and their
variables.
Raises:
ValueError: If the network fails to build.
"""
assert self.built
return self.trainable_weights
@property
def info_spec(self):
return ()
def copy(self, **kwargs):
"""Create a shallow copy of this network.
**NOTE** Network layer weights are *never* copied. This method
recreates the `Network` instance with the same arguments it was
initialized with (excepting any new kwargs).
Args:
**kwargs: Args to override when recreating this network. Commonly
overridden args include 'name'.
Returns:
A shallow copy of this network.
"""
return type(self)(**dict(self._saved_kwargs, **kwargs))
def __call__(self, inputs, *args, **kwargs):
tf.nest.assert_same_structure(inputs, self.input_tensor_spec)
# TODO: Debug.
with tf.compat.v1.variable_scope(self._name,
reuse=tf.compat.v1.AUTO_REUSE):
outputs = self.call(inputs, *args, **kwargs)
self._built = True
return outputs
class DistributionNetwork(Network):
"""Base class for networks which generate Distributions as their output."""
def __init__(self, input_tensor_spec, state_spec, output_spec, name):
super(DistributionNetwork, self).__init__(
input_tensor_spec=input_tensor_spec, state_spec=state_spec,
name=name)
self._output_spec = output_spec
@property
def output_spec(self):
return self._output_spec
| 35.332
| 79
| 0.625042
|
b877129fed8da189ac42018f3acc0805d83ffa13
| 6,330
|
py
|
Python
|
bin/ADFRsuite/CCSBpckgs/UTpackages/UTmesh/lbiemesher.py
|
AngelRuizMoreno/Jupyter_Dock_devel
|
6d23bc174d5294d1e9909a0a1f9da0713042339e
|
[
"MIT"
] | null | null | null |
bin/ADFRsuite/CCSBpckgs/UTpackages/UTmesh/lbiemesher.py
|
AngelRuizMoreno/Jupyter_Dock_devel
|
6d23bc174d5294d1e9909a0a1f9da0713042339e
|
[
"MIT"
] | null | null | null |
bin/ADFRsuite/CCSBpckgs/UTpackages/UTmesh/lbiemesher.py
|
AngelRuizMoreno/Jupyter_Dock_devel
|
6d23bc174d5294d1e9909a0a1f9da0713042339e
|
[
"MIT"
] | 1
|
2021-11-04T21:48:14.000Z
|
2021-11-04T21:48:14.000Z
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.5
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_lbiemesher', [dirname(__file__)])
except ImportError:
import _lbiemesher
return _lbiemesher
if fp is not None:
try:
_mod = imp.load_module('_lbiemesher', fp, pathname, description)
finally:
fp.close()
return _mod
_lbiemesher = swig_import_helper()
del swig_import_helper
else:
import _lbiemesher
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
_lbiemesher.DEFAULT_ERR_swigconstant(_lbiemesher)
DEFAULT_ERR = _lbiemesher.DEFAULT_ERR
_lbiemesher.DEFAULT_ERR_IN_swigconstant(_lbiemesher)
DEFAULT_ERR_IN = _lbiemesher.DEFAULT_ERR_IN
_lbiemesher.DEFAULT_IVAL_swigconstant(_lbiemesher)
DEFAULT_IVAL = _lbiemesher.DEFAULT_IVAL
_lbiemesher.DEFAULT_IVAL_IN_swigconstant(_lbiemesher)
DEFAULT_IVAL_IN = _lbiemesher.DEFAULT_IVAL_IN
_lbiemesher.SINGLE_swigconstant(_lbiemesher)
SINGLE = _lbiemesher.SINGLE
_lbiemesher.HEXA_swigconstant(_lbiemesher)
HEXA = _lbiemesher.HEXA
_lbiemesher.DOUBLE_swigconstant(_lbiemesher)
DOUBLE = _lbiemesher.DOUBLE
_lbiemesher.TETRA_swigconstant(_lbiemesher)
TETRA = _lbiemesher.TETRA
_lbiemesher.T_4_H_swigconstant(_lbiemesher)
T_4_H = _lbiemesher.T_4_H
_lbiemesher.TETRA2_swigconstant(_lbiemesher)
TETRA2 = _lbiemesher.TETRA2
class LBIE_Mesher(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, LBIE_Mesher, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, LBIE_Mesher, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _lbiemesher.new_LBIE_Mesher(*args)
try:
self.this.append(this)
except:
self.this = this
__swig_destroy__ = _lbiemesher.delete_LBIE_Mesher
__del__ = lambda self: None
__swig_setmethods__["oc"] = _lbiemesher.LBIE_Mesher_oc_set
__swig_getmethods__["oc"] = _lbiemesher.LBIE_Mesher_oc_get
if _newclass:
oc = _swig_property(_lbiemesher.LBIE_Mesher_oc_get, _lbiemesher.LBIE_Mesher_oc_set)
def inputData(self, data, dims, numVerts, numCells, origin=0, spans=0):
return _lbiemesher.LBIE_Mesher_inputData(self, data, dims, numVerts, numCells, origin, spans)
def fileOpen(self, arg2):
return _lbiemesher.LBIE_Mesher_fileOpen(self, arg2)
def fileSave(self, arg2):
return _lbiemesher.LBIE_Mesher_fileSave(self, arg2)
def setMesh(self, arg2):
return _lbiemesher.LBIE_Mesher_setMesh(self, arg2)
def errorChange(self, arg2):
return _lbiemesher.LBIE_Mesher_errorChange(self, arg2)
def errorChange_in(self, arg2):
return _lbiemesher.LBIE_Mesher_errorChange_in(self, arg2)
def isovalueChange(self, arg2):
return _lbiemesher.LBIE_Mesher_isovalueChange(self, arg2)
def isovalueChange_in(self, arg2):
return _lbiemesher.LBIE_Mesher_isovalueChange_in(self, arg2)
def outTriangle(self, outverts, outfaces):
return _lbiemesher.LBIE_Mesher_outTriangle(self, outverts, outfaces)
def outTetra(self, outverts, outfaces):
return _lbiemesher.LBIE_Mesher_outTetra(self, outverts, outfaces)
def outHexa(self, outverts, outfaces):
return _lbiemesher.LBIE_Mesher_outHexa(self, outverts, outfaces)
def outQuad(self, outverts, outfaces):
return _lbiemesher.LBIE_Mesher_outQuad(self, outverts, outfaces)
def getNumFaces(self):
return _lbiemesher.LBIE_Mesher_getNumFaces(self)
def getNumVerts(self):
return _lbiemesher.LBIE_Mesher_getNumVerts(self)
def getVolMin(self):
return _lbiemesher.LBIE_Mesher_getVolMin(self)
def getVolMax(self):
return _lbiemesher.LBIE_Mesher_getVolMax(self)
def getOuterSurface(self):
return _lbiemesher.LBIE_Mesher_getOuterSurface(self)
def setXCutPlane(self, plane_x):
return _lbiemesher.LBIE_Mesher_setXCutPlane(self, plane_x)
def setZCutPlane(self, plane_z):
return _lbiemesher.LBIE_Mesher_setZCutPlane(self, plane_z)
def getSurface(self, crossection=0):
return _lbiemesher.LBIE_Mesher_getSurface(self, crossection)
LBIE_Mesher_swigregister = _lbiemesher.LBIE_Mesher_swigregister
LBIE_Mesher_swigregister(LBIE_Mesher)
# This file is compatible with both classic and new-style classes.
| 30.432692
| 101
| 0.7109
|
f5f309234f3cae27b898390a996e7eca3a32c4cc
| 12,587
|
py
|
Python
|
iridauploader/core/cli_entry.py
|
dfornika/irida-uploader
|
0d855433bf5b567ff1e63501950fdc145b488742
|
[
"Apache-2.0"
] | null | null | null |
iridauploader/core/cli_entry.py
|
dfornika/irida-uploader
|
0d855433bf5b567ff1e63501950fdc145b488742
|
[
"Apache-2.0"
] | null | null | null |
iridauploader/core/cli_entry.py
|
dfornika/irida-uploader
|
0d855433bf5b567ff1e63501950fdc145b488742
|
[
"Apache-2.0"
] | null | null | null |
import logging
from pprint import pformat
import iridauploader.api as api
import iridauploader.parsers as parsers
import iridauploader.progress as progress
from iridauploader.model import DirectoryStatus
from . import api_handler, parsing_handler, logger, exit_return
VERSION_NUMBER = "0.4.1"
def upload_run_single_entry(directory, force_upload=False):
"""
This function acts as a single point of entry for uploading a directory
Handles getting a directories run status, and running if conditions are met (valid run, new run or forced upload).
:param directory: Directory of the sequencing run to upload
:param force_upload: When set to true, the upload status file will be ignored and file will attempt to be uploaded
:return: ExitReturn
"""
directory_status = parsing_handler.get_run_status(directory)
# Check if a run is invalid, an invalid run cannot be uploaded.
if directory_status.status_equals(DirectoryStatus.INVALID):
error_msg = "ERROR! Run in directory {} is invalid. Returned with message: '{}'".format(
directory_status.directory, directory_status.message)
logging.error(error_msg)
return exit_error(error_msg)
# Only upload if run is new, or force_upload is True
if not force_upload:
if not directory_status.status_equals(DirectoryStatus.NEW):
error_msg = "ERROR! Run in directory {} is not new. It has either been uploaded, " \
"or an upload was attempted with error. " \
"Please check the status file 'irida_uploader_status.info' " \
"in the run directory for more details. " \
"You can bypass this error by uploading with the --force argument.".format(directory)
logging.error(error_msg)
return exit_error(error_msg)
return _validate_and_upload(directory_status)
def batch_upload_single_entry(batch_directory, force_upload=False):
"""
This function acts as a single point of entry for batch uploading run directories
It uses _validate_and_upload as it function for uploading the individual runs
A list of runs to be uploaded is generated at start up, and all found runs will be attempted to be uploaded.
:param batch_directory: Directory containing sequencing run directories to upload
:param force_upload: When set to true, the upload status file will be ignored and file will attempt to be uploaded
:return: ExitReturn
"""
logging.debug("batch_upload_single_entry:Starting {} with force={}".format(batch_directory, force_upload))
# get all potential directories to upload
directory_status_list = parsing_handler.get_run_status_list(batch_directory)
# list info about directories found
logging.info("Found {} potential run directories".format(len(directory_status_list)))
for directory_status in directory_status_list:
logging.info("DIRECTORY: %s\n"
"%30sSTATUS: %s\n"
"%30sDETAILS: %s"
% (directory_status.directory, "", directory_status.status, "", directory_status.message))
# if `force` is on, only don't upload invalid runs
if force_upload:
upload_list = [x for x in directory_status_list if not x.status_equals(DirectoryStatus.INVALID)]
logging.info("Starting upload for all non invalid runs. {} run(s) found. "
"(Running with --force)".format(len(upload_list)))
# without `force` only upload new runs
else:
upload_list = [x for x in directory_status_list if x.status_equals(DirectoryStatus.NEW)]
logging.info("Starting upload for all new runs. {} run(s) found.".format(len(upload_list)))
# run upload, keep track of which directories did not upload
error_list = []
for directory_status in upload_list:
logging.info("Starting upload for {}".format(directory_status.directory))
result = _validate_and_upload(directory_status)
if result.exit_code == exit_return.EXIT_CODE_ERROR:
error_list.append(directory_status.directory)
logging.info("Uploads completed with {} error(s)".format(len(error_list)))
for directory in error_list:
logging.warning("Directory '{}' upload exited with ERROR, check log and status file for details"
"".format(directory))
logging.info("Batch upload complete, Exiting!")
return exit_success()
def _validate_and_upload(directory_status):
"""
This function attempts to upload a single run directory
Handles parsing and validating the directory for samples
Sets up the api layer based on config file
Verifies samples is able to be uploaded (verifies projects exist)
Initializes objects/routes on IRIDA to accept Samples (creates samples if they don't exist)
Starts the upload
:param directory_status: DirectoryStatus object that has directory to try upload
:return: ExitReturn
"""
logging_start_block(directory_status.directory)
logging.debug("upload_run_single_entry:Starting {}".format(directory_status.directory))
# Add progress file to directory
try:
_set_and_write_directory_status(directory_status, DirectoryStatus.PARTIAL)
except progress.exceptions.DirectoryError as e:
logging.error("ERROR! Error while trying to write status file to directory {} with error message: {}"
"".format(e.directory, e.message))
logging.info("Samples not uploaded!")
return exit_error()
# Do parsing (Also offline validation)
try:
sequencing_run = parsing_handler.parse_and_validate(directory_status.directory)
except parsers.exceptions.DirectoryError as e:
# Directory was not valid for some reason
full_error = "ERROR! An error occurred with directory '{}', with message: {}".format(e.directory, e.message)
logging.error(full_error)
logging.info("Samples not uploaded!")
_set_and_write_directory_status(directory_status, DirectoryStatus.ERROR, full_error)
return exit_error(e)
except parsers.exceptions.ValidationError as e:
# Sequencing Run / SampleSheet was not valid for some reason
error_msg = "ERROR! Errors occurred during validation with message: {}".format(e.message)
logging.error(error_msg)
error_list_msg = "Error list: " + pformat(e.validation_result.error_list)
logging.error(error_list_msg)
logging.info("Samples not uploaded!")
full_error = error_msg + ", " + error_list_msg
_set_and_write_directory_status(directory_status, DirectoryStatus.ERROR, full_error)
return exit_error(e)
# Initialize the api for first use
logging.info("*** Connecting to IRIDA ***")
try:
api_handler.initialize_api_from_config()
except api.exceptions.IridaConnectionError as e:
logging.error("ERROR! Could not initialize irida api.")
logging.error("Errors: " + pformat(e.args))
logging.info("Samples not uploaded!")
full_error = "ERROR! Could not initialize irida api. Errors: " + pformat(e.args)
_set_and_write_directory_status(directory_status, DirectoryStatus.ERROR, full_error)
return exit_error(e)
logging.info("*** Connected ***")
logging.info("*** Verifying run (online validation) ***")
try:
validation_result = api_handler.prepare_and_validate_for_upload(sequencing_run)
except api.exceptions.IridaConnectionError as e:
logging.error("Lost connection to Irida")
logging.error("Errors: " + pformat(e.args))
full_error = "Lost connection to Irida. Errors: " + pformat(e.args)
_set_and_write_directory_status(directory_status, DirectoryStatus.ERROR, full_error)
return exit_error(e)
if not validation_result.is_valid():
logging.error("Sequencing run can not be uploaded")
logging.error("Sequencing run can not be uploaded. Encountered {} errors"
"".format(validation_result.error_count()))
logging.error("Errors: " + pformat(validation_result.error_list))
full_error = "Sequencing run can not be uploaded, Errors: " + pformat(validation_result.error_list)
_set_and_write_directory_status(directory_status, DirectoryStatus.ERROR, full_error)
return exit_error(full_error)
logging.info("*** Run Verified ***")
# Start upload
logging.info("*** Starting Upload ***")
try:
run_id = api_handler.upload_sequencing_run(sequencing_run)
except api.exceptions.IridaConnectionError as e:
logging.error("Lost connection to Irida")
logging.error("Errors: " + pformat(e.args))
full_error = "Lost connection to Irida. Errors: " + pformat(e.args)
_set_and_write_directory_status(directory_status, DirectoryStatus.ERROR, full_error)
return exit_error(e)
except api.exceptions.IridaResourceError as e:
logging.error("Could not access IRIDA resource")
logging.error("Errors: " + pformat(e.args))
full_error = "Could not access IRIDA resource Errors: " + pformat(e.args)
_set_and_write_directory_status(directory_status, DirectoryStatus.ERROR, full_error)
return exit_error(e)
except api.exceptions.FileError as e:
logging.error("Could not upload file to IRIDA")
logging.error("Errors: " + pformat(e.args))
full_error = "Could not upload file to IRIDA. Errors: " + pformat(e.args)
_set_and_write_directory_status(directory_status, DirectoryStatus.ERROR, full_error)
return exit_error(e)
logging.info("*** Upload Complete ***")
# Set progress file to complete
try:
_set_and_write_directory_status(directory_status, DirectoryStatus.COMPLETE, run_id=run_id)
except progress.exceptions.DirectoryError as e:
# this is an exceptionally rare case (successful upload, but fails to write progress)
logging.ERROR("ERROR! Error while trying to write status file to directory {} with error message: {}"
"".format(e.directory, e.message))
logging.info("Samples were uploaded, but progress file may be incorrect!")
logging.info("Samples in directory '{}' have finished uploading!".format(directory_status.directory))
logging_end_block()
return exit_success()
def _set_and_write_directory_status(directory_status, status, message=None, run_id=None):
"""
Given a DirectoryStatus object, sets the status and message, and then writes to the directory status directory
:param directory_status: DirectoryStatus object
:param status: a valid DirectoryStatus status
:param message: string
:param run_id: optional, if provided, the run id and irida instance will be included when written
:return:
"""
directory_status.status = status
directory_status.message = message
if run_id:
progress.write_directory_status(directory_status, run_id)
else:
progress.write_directory_status(directory_status)
def exit_error(error):
"""
Returns an failed run exit code which ends the process when returned
:return: ExitReturn with EXIT_CODE_ERROR
"""
logging_end_block()
return exit_return.ExitReturn(exit_return.EXIT_CODE_ERROR, error)
def exit_success():
"""
Returns an success run exit code which ends the process when returned
:return: ExitReturn with EXIT_CODE_SUCCESS
"""
return exit_return.ExitReturn(exit_return.EXIT_CODE_SUCCESS)
def logging_start_block(directory):
"""
Logs an information block to the console and file which indicates the start of an upload run.
Includes the uploader version number set in this module
:return:
"""
logger.add_log_to_directory(directory)
logging.info("==================================================")
logging.info("---------------STARTING UPLOAD RUN----------------")
logging.info("Uploader Version {}".format(VERSION_NUMBER))
logging.info("Logging to file in: " + logger.get_user_log_dir())
logging.info("==================================================")
def logging_end_block():
"""
Logs an block to the console and file that indicates the end of an upload run.
:return:
"""
logging.info("==================================================")
logging.info("----------------ENDING UPLOAD RUN-----------------")
logging.info("==================================================")
logger.remove_directory_logger()
| 45.937956
| 118
| 0.69111
|
6f3bf9e1410976950d6b3c1625db1efba20ae4eb
| 159
|
py
|
Python
|
pc_toolbox/utils_data/__init__.py
|
TracyYXChen/prediction-constrained-topic-models
|
91f6f71d10583b49161cd058ededffe93aed0e02
|
[
"MIT"
] | null | null | null |
pc_toolbox/utils_data/__init__.py
|
TracyYXChen/prediction-constrained-topic-models
|
91f6f71d10583b49161cd058ededffe93aed0e02
|
[
"MIT"
] | null | null | null |
pc_toolbox/utils_data/__init__.py
|
TracyYXChen/prediction-constrained-topic-models
|
91f6f71d10583b49161cd058ededffe93aed0e02
|
[
"MIT"
] | null | null | null |
from pc_toolbox.utils_data.util_data_slicer import make_slice_for_step
from pc_toolbox.utils_data.util_stratified_subsample import get_stratified_subsample_ids
| 79.5
| 88
| 0.930818
|
fac348b836b439122445dbce6457fb7301a0b8fb
| 7,406
|
py
|
Python
|
test/functional/qtum_duplicate_stake.py
|
machado-rev/HTMLCOIN
|
f8224d522932b929d4c9ff53ece6dc5e83d91749
|
[
"MIT"
] | 106
|
2017-11-21T14:29:30.000Z
|
2022-01-28T04:40:07.000Z
|
test/functional/qtum_duplicate_stake.py
|
machado-rev/HTMLCOIN
|
f8224d522932b929d4c9ff53ece6dc5e83d91749
|
[
"MIT"
] | 75
|
2018-01-04T16:23:02.000Z
|
2021-12-21T05:59:44.000Z
|
test/functional/qtum_duplicate_stake.py
|
machado-rev/HTMLCOIN
|
f8224d522932b929d4c9ff53ece6dc5e83d91749
|
[
"MIT"
] | 106
|
2017-11-21T16:25:16.000Z
|
2022-01-20T13:58:15.000Z
|
#!/usr/bin/env python3
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.messages import *
from test_framework.qtum import *
import time
class QtumDuplicateStakeTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def start_p2p_connection(self):
self.p2p_node = self.node.add_p2p_connection(P2PInterface())
self.p2p_alt_node = self.nodes[1].add_p2p_connection(P2PInterface())
def _remove_from_staking_prevouts(self, staking_prevouts, remove_prevout):
for j in range(len(staking_prevouts)):
prevout = staking_prevouts[j]
if prevout[0].serialize() == remove_prevout.serialize():
staking_prevouts.pop(j)
break
def verify_duplicate_stakes_are_accepted_test(self):
tip = self.node.getblock(self.node.getbestblockhash())
t = (tip['time']+0x10) & 0xfffffff0
# Create one "normal" block
block, block_sig_key = create_unsigned_pos_block(self.node, self.staking_prevouts, nTime=t)
block.sign_block(block_sig_key)
block.rehash()
# Create a slightly different block using the same staking utxo (only difference is the nonce)
alt_block = CBlock(block)
alt_block.vtx = block.vtx[:]
alt_block.nNonce = 1
alt_block.rehash()
alt_block.sign_block(block_sig_key)
alt_block.rehash()
# Send <block> to node
self.p2p_node.send_message(msg_block(block))
# Send <alt_block> to alt_node
self.p2p_alt_node.send_message(msg_block(alt_block))
time.sleep(2)
assert_equal(self.node.getbestblockhash(), block.hash)
assert_equal(self.alt_node.getbestblockhash(), alt_block.hash)
# Build a longer chain on alt_node
self.alt_node.generate(1)
self.sync_all()
self._remove_from_staking_prevouts(self.staking_prevouts, block.prevoutStake)
def verify_spent_stake_is_accepted_in_fork_test(self):
tip = self.node.getblock(self.node.getbestblockhash())
t = (tip['time']+0x10) & 0xfffffff0
# Create one "normal" block
block, block_sig_key = create_unsigned_pos_block(self.node, self.staking_prevouts, nTime=t)
block.sign_block(block_sig_key)
block.rehash()
# Create a different block that spends the prevoutStake from <block>
alt_block, alt_block_sig_key = create_unsigned_pos_block(self.alt_node, self.alt_staking_prevouts, nTime=t)
tx = CTransaction()
tx.vin = [CTxIn(block.prevoutStake)]
tx.vout = [CTxOut(int(COIN), scriptPubKey=CScript([OP_TRUE]))]
tx = rpc_sign_transaction(self.node, tx)
alt_block.vtx.append(tx)
alt_block.hashMerkleRoot = alt_block.calc_merkle_root()
alt_block.rehash()
alt_block.sign_block(alt_block_sig_key)
alt_block.rehash()
# Send <alt_block> to alt_node
self.p2p_alt_node.send_message(msg_block(alt_block))
# Send <block> to node
self.p2p_node.send_message(msg_block(block))
time.sleep(2)
assert_equal(self.node.getbestblockhash(), block.hash)
assert_equal(self.alt_node.getbestblockhash(), alt_block.hash)
# Build a longer chain on alt_node
self.alt_node.generate(1)
self.sync_all()
self._remove_from_staking_prevouts(self.staking_prevouts, block.prevoutStake)
self._remove_from_staking_prevouts(self.alt_staking_prevouts, alt_block.prevoutStake)
def verify_spent_stake_in_old_block_is_rejected_test(self):
tip = self.node.getblock(self.node.getbestblockhash())
t = (tip['time']+0x10) & 0xfffffff0
# Create one "normal" block
block, block_sig_key = create_unsigned_pos_block(self.node, self.staking_prevouts, nTime=t)
block.sign_block(block_sig_key)
block.rehash()
# Create a different block that spends the prevoutStake from <block>
alt_block, alt_block_sig_key = create_unsigned_pos_block(self.alt_node, self.alt_staking_prevouts, nTime=t)
tx = CTransaction()
tx.vin = [CTxIn(block.prevoutStake)]
tx.vout = [CTxOut(int(COIN), scriptPubKey=CScript([OP_TRUE]))]
tx = rpc_sign_transaction(self.node, tx)
alt_block.vtx.append(tx)
alt_block.hashMerkleRoot = alt_block.calc_merkle_root()
alt_block.rehash()
alt_block.sign_block(alt_block_sig_key)
alt_block.rehash()
# Send <alt_block> to alt_node
self.p2p_alt_node.send_message(msg_block(alt_block))
time.sleep(2)
self.alt_node.generate(500)
time.sleep(2)
# Send <block> to node
self.p2p_node.send_message(msg_block(block))
time.sleep(2)
assert_raises_rpc_error(-5, "Block not found", self.node.getblockheader, block.hash)
time.sleep(2)
self.sync_all()
self._remove_from_staking_prevouts(self.staking_prevouts, alt_block.prevoutStake)
self._remove_from_staking_prevouts(self.alt_staking_prevouts, alt_block.prevoutStake)
def run_test(self):
privkey = byte_to_base58(hash256(struct.pack('<I', 0)), 239)
for n in self.nodes:
n.importprivkey(privkey)
self.node = self.nodes[0]
self.alt_node = self.nodes[1]
self.node.setmocktime(int(time.time() - 100*24*60*60))
self.alt_node.setmocktime(int(time.time() - 100*24*60*60))
self.alt_node.generatetoaddress(50, "qSrM9K6FMhZ29Vkp8Rdk8Jp66bbfpjFETq")
self.sync_all()
self.node.generatetoaddress(500, "qSrM9K6FMhZ29Vkp8Rdk8Jp66bbfpjFETq")
self.sync_all()
self.alt_staking_prevouts = collect_prevouts(self.alt_node)
self.node.generatetoaddress(50, "qSrM9K6FMhZ29Vkp8Rdk8Jp66bbfpjFETq")
self.sync_all()
self.staking_prevouts = collect_prevouts(self.node)
print(len(self.staking_prevouts), len(self.alt_staking_prevouts))
for prevout in self.alt_staking_prevouts:
self._remove_from_staking_prevouts(self.staking_prevouts, prevout[0])
print(len(self.staking_prevouts), len(self.alt_staking_prevouts))
self.node.setmocktime(0)
self.alt_node.setmocktime(0)
self.start_p2p_connection()
time.sleep(0x10)
print(len(self.staking_prevouts), len(self.alt_staking_prevouts))
self.verify_duplicate_stakes_are_accepted_test()
assert_equal(self.node.getblockcount(), self.alt_node.getblockcount())
assert_equal(self.node.getbestblockhash(), self.alt_node.getbestblockhash())
time.sleep(0x10)
print(len(self.staking_prevouts), len(self.alt_staking_prevouts))
self.verify_spent_stake_is_accepted_in_fork_test()
assert_equal(self.node.getblockcount(), self.alt_node.getblockcount())
assert_equal(self.node.getbestblockhash(), self.alt_node.getbestblockhash())
time.sleep(0x10)
print(len(self.staking_prevouts), len(self.alt_staking_prevouts))
self.verify_spent_stake_in_old_block_is_rejected_test()
if __name__ == '__main__':
QtumDuplicateStakeTest().main()
| 40.469945
| 115
| 0.692547
|
06631ab4743c076d1399306e9ae7bc314c0597d7
| 1,384
|
py
|
Python
|
project/project/urls.py
|
justo340/blogapp
|
7baaee3eed5385dd80b94d77ad72a1b95b7035f3
|
[
"MIT"
] | null | null | null |
project/project/urls.py
|
justo340/blogapp
|
7baaee3eed5385dd80b94d77ad72a1b95b7035f3
|
[
"MIT"
] | null | null | null |
project/project/urls.py
|
justo340/blogapp
|
7baaee3eed5385dd80b94d77ad72a1b95b7035f3
|
[
"MIT"
] | null | null | null |
"""project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from users import views as user_views
from django.contrib.auth import views as auth_views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls')),
path('register/', user_views.register, name='register'),
path('login/', auth_views.LoginView.as_view(template_name='users/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='users/logout.html'), name='logout'),
path('profile/', user_views.profile, name='profile'),
]
if settings.DEBUG:
urlpatterns += static (settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 37.405405
| 101
| 0.725434
|
e03383704bbfced129cccf64b3855dde896b693d
| 3,002
|
py
|
Python
|
setup.py
|
IntelPython/smp
|
aa319b916d990c5a21a30f64817ee57ee75f761c
|
[
"BSD-3-Clause"
] | 14
|
2017-07-13T15:51:30.000Z
|
2022-02-01T08:20:28.000Z
|
setup.py
|
IntelPython/smp
|
aa319b916d990c5a21a30f64817ee57ee75f761c
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
IntelPython/smp
|
aa319b916d990c5a21a30f64817ee57ee75f761c
|
[
"BSD-3-Clause"
] | 2
|
2019-02-13T20:58:41.000Z
|
2019-03-01T10:56:41.000Z
|
#!/usr/bin/env python
# Copyright (c) 2017, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# System imports
from __future__ import print_function
import platform
import os
from smp import __version__ as ver, __doc__ as doc
from distutils.core import *
setup( name ="SMP",
description ="Static Multi-Processing",
long_description= doc,
url ="https://software.intel.com/intel-distribution-for-python",
author ="Intel Corporation",
author_email="scripting@intel.com",
license ="BSD",
version = ver,
classifiers =[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Plugins',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: Other Audience',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
#'Operating System :: MacOS :: MacOS X',
#'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: System :: Hardware :: Symmetric Multi-processing',
],
keywords='multiprocessing multithreading composable parallelism affinity',
packages=['smp'],
py_modules=['SMP'],
)
| 46.184615
| 82
| 0.678548
|
e0935a9df4ee46fc3355e5db4720d55e79803a3e
| 19,206
|
py
|
Python
|
venv/lib/python3.7/site-packages/debian/debtags.py
|
margretmwangi/Rblog
|
2d606a858c3313e1d48cdd6a8ce205c8776be754
|
[
"Unlicense"
] | null | null | null |
venv/lib/python3.7/site-packages/debian/debtags.py
|
margretmwangi/Rblog
|
2d606a858c3313e1d48cdd6a8ce205c8776be754
|
[
"Unlicense"
] | 1
|
2021-02-08T20:34:54.000Z
|
2021-02-08T20:34:54.000Z
|
venv/lib/python3.7/site-packages/debian/debtags.py
|
margretmwangi/Rblog
|
2d606a858c3313e1d48cdd6a8ce205c8776be754
|
[
"Unlicense"
] | 1
|
2020-11-04T06:48:34.000Z
|
2020-11-04T06:48:34.000Z
|
""" Facilities to work with debtags - tags for Debian packages """
# Copyright (C) 2006-2007 Enrico Zini <enrico@enricozini.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, print_function
import re
import six
try:
import cPickle as pickle # type: ignore
except ImportError:
import pickle # type: ignore
try:
# pylint: disable=unused-import
from typing import (
Callable,
Dict,
IO,
Iterable,
Iterator,
List,
Optional,
Set,
Text,
Tuple,
)
PkgTagDbType = Dict[str, Set[str]]
TagPkgDbType = Dict[str, Set[str]]
PkgFilterType = Callable[[Text], bool]
TagFilterType = Callable[[Text], bool]
PkgTagFilterType = Callable[[Tuple[Text, Set[Text]]], bool]
except ImportError:
# Lack of typing is not important at runtime
pass
from debian.deprecation import function_deprecated_by
def parse_tags(input_data):
# type: (Iterator[Text]) -> Iterator[Tuple[Set[str], Set[str]]]
lre = re.compile(r"^(.+?)(?::?\s*|:\s+(.+?)\s*)$")
for line in input_data:
# Is there a way to remove the last character of a line that does not
# make a copy of the entire line?
m = lre.match(line)
if not m:
continue
pkgs = set(m.group(1).split(', '))
if m.group(2):
tags = set(m.group(2).split(', '))
else:
tags = set()
yield pkgs, tags
parseTags = function_deprecated_by(parse_tags)
def read_tag_database(input_data):
# type: (Iterator[Text]) -> PkgTagDbType
"""Read the tag database, returning a pkg->tags dictionary"""
db = {} # type: PkgTagDbType
for pkgs, tags in parse_tags(input_data):
# Create the tag set using the native set
for p in pkgs:
db[p] = tags.copy()
return db
readTagDatabase = function_deprecated_by(read_tag_database)
def read_tag_database_reversed(input_data):
# type: (Iterator[Text]) -> TagPkgDbType
"""Read the tag database, returning a tag->pkgs dictionary"""
db = {} # type: TagPkgDbType
for pkgs, tags in parse_tags(input_data):
# Create the tag set using the native set
for tag in tags:
if tag in db:
db[tag] |= pkgs
else:
db[tag] = pkgs.copy()
return db
readTagDatabaseReversed = function_deprecated_by(read_tag_database_reversed)
def read_tag_database_both_ways(
input_data, # type: Iterator[Text]
tag_filter=None, # type: TagFilterType
):
# type: (...) -> Tuple[PkgTagDbType, TagPkgDbType]
"Read the tag database, returning a pkg->tags and a tag->pkgs dictionary"
db = {} # type: PkgTagDbType
dbr = {} # type: TagPkgDbType
for pkgs, tags in parse_tags(input_data):
# Create the tag set using the native set
if tag_filter is None:
tags = set(tags)
else:
tags = set(filter(tag_filter, tags))
for pkg in pkgs:
db[pkg] = tags.copy()
for tag in tags:
if tag in dbr:
dbr[tag] |= pkgs
else:
dbr[tag] = pkgs.copy()
return db, dbr
readTagDatabaseBothWays = function_deprecated_by(read_tag_database_both_ways)
def reverse(db):
# type: (PkgTagDbType) -> TagPkgDbType
"""Reverse a tag database, from package -> tags to tag->packages"""
res = {} # type: Dict[str, Set[str]]
for pkg, tags in db.items():
for tag in tags:
if tag not in res:
res[tag] = set()
res[tag].add(pkg)
return res
def output(db):
# type: (PkgTagDbType) -> None
"Write the tag database"
for pkg, tags in db.items():
# Using % here seems awkward to me, but if I use calls to
# sys.stdout.write it becomes a bit slower
print("%s:" % (pkg), ", ".join(tags))
def relevance_index_function(full, sub):
#return (float(sub.card(tag)) / float(sub.tag_count())) / \
# (float(full.card(tag)) / float(full.tag_count()))
#return sub.card(tag) * full.card(tag) / sub.tag_count()
# New cardinality divided by the old cardinality
#return float(sub.card(tag)) / float(full.card(tag))
## Same as before, but weighted by the relevance the tag had in the
## full collection, to downplay the importance of rare tags
#return float(sub.card(tag) * full.card(tag)) / float(full.card(tag) * full.tag_count())
# Simplified version:
# return float(sub.card(tag)) / float(full.tag_count())
# Weighted by the square root of the relevance, to downplay the very
# common tags a bit
# return lambda tag: float(sub.card(tag)) / float(full.card(tag)) *
# math.sqrt(full.card(tag) / float(full.tag_count()))
# return lambda tag: float(sub.card(tag)) / float(full.card(tag)) *
# math.sqrt(full.card(tag) / float(full.package_count()))
# One useless factor removed, and simplified further, thanks to Benjamin Mesing
return lambda tag: float(sub.card(tag)**2) / float(full.card(tag))
# The difference between how many packages are in and how many packages are out
# (problems: tags that mean many different things can be very much out
# as well. In the case of 'image editor', for example, there will be
# lots of editors not for images in the outside group.
# It is very, very good for nonambiguous keywords like 'image'.
# return lambda tag: 2 * sub.card(tag) - full.card(tag)
# Same but it tries to downplay the 'how many are out' value in the
# case of popular tags, to mitigate the 'there will always be popular
# tags left out' cases. Does not seem to be much of an improvement.
# return lambda tag: sub.card(tag) - float(full.card(tag) - sub.card(tag))/
# (math.sin(float(full.card(tag))*3.1415/full.package_count())/4 + 0.75)
relevanceIndexFunction = function_deprecated_by(relevance_index_function)
class DB:
"""
In-memory database mapping packages to tags and tags to packages.
"""
def __init__(self):
# type: () -> None
self.db = {} # type: PkgTagDbType
self.rdb = {} # type: TagPkgDbType
def read(self,
input_data, # type: Iterator[Text]
tag_filter=None, # type: TagFilterType
):
# type: (...) -> None
"""
Read the database from a file.
Example::
# Read the system Debtags database
db.read(open("/var/lib/debtags/package-tags", "r"))
"""
self.db, self.rdb = read_tag_database_both_ways(input_data, tag_filter)
def qwrite(self, file):
# type: (IO[bytes]) -> None
"""Quickly write the data to a pickled file"""
pickle.dump(self.db, file)
pickle.dump(self.rdb, file)
def qread(self, file):
# type: (IO[bytes]) -> None
"""Quickly read the data from a pickled file"""
self.db = pickle.load(file)
self.rdb = pickle.load(file)
def insert(self, pkg, tags):
# type: (str, Set[str]) -> None
self.db[pkg] = tags.copy()
for tag in tags:
if tag in self.rdb:
self.rdb[tag].add(pkg)
else:
self.rdb[tag] = set((pkg))
def dump(self):
# type: () -> None
output(self.db)
def dump_reverse(self):
# type: () -> None
output(self.rdb)
dumpReverse = function_deprecated_by(dump_reverse)
def reverse(self):
# type: () -> DB
"Return the reverse collection, sharing tagsets with this one"
res = DB()
res.db = self.rdb
res.rdb = self.db
return res
def facet_collection(self):
# type: () -> DB
"""
Return a copy of this collection, but replaces the tag names
with only their facets.
"""
fcoll = DB()
tofacet = re.compile(r"^([^:]+).+")
for pkg, tags in self.iter_packages_tags():
ftags = {tofacet.sub(r"\1", t) for t in tags}
fcoll.insert(pkg, ftags)
return fcoll
facetCollection = function_deprecated_by(facet_collection)
def copy(self):
# type: () -> DB
"""
Return a copy of this collection, with the tagsets copied as
well.
"""
res = DB()
res.db = self.db.copy()
res.rdb = self.rdb.copy()
return res
def reverse_copy(self):
# type: () -> DB
"""
Return the reverse collection, with a copy of the tagsets of
this one.
"""
res = DB()
res.db = self.rdb.copy()
res.rdb = self.db.copy()
return res
reverseCopy = function_deprecated_by(reverse_copy)
def choose_packages(self, package_iter):
# type: (Iterable[str]) -> DB
"""
Return a collection with only the packages in package_iter,
sharing tagsets with this one
"""
res = DB()
db = {}
for pkg in package_iter:
if pkg in self.db:
db[pkg] = self.db[pkg]
res.db = db
res.rdb = reverse(db)
return res
choosePackages = function_deprecated_by(choose_packages)
def choose_packages_copy(self, package_iter):
# type: (Iterable[str]) -> DB
"""
Return a collection with only the packages in package_iter,
with a copy of the tagsets of this one
"""
res = DB()
db = {}
for pkg in package_iter:
db[pkg] = self.db[pkg]
res.db = db
res.rdb = reverse(db)
return res
choosePackagesCopy = function_deprecated_by(choose_packages_copy)
def filter_packages(self, package_filter):
# type: (PkgFilterType) -> DB
"""
Return a collection with only those packages that match a
filter, sharing tagsets with this one. The filter will match
on the package.
"""
res = DB()
db = {}
for pkg in filter(package_filter, six.iterkeys(self.db)):
db[pkg] = self.db[pkg]
res.db = db
res.rdb = reverse(db)
return res
filterPackages = function_deprecated_by(filter_packages)
def filter_packages_copy(self, filter_data):
# type: (PkgFilterType) -> DB
"""
Return a collection with only those packages that match a
filter, with a copy of the tagsets of this one. The filter
will match on the package.
"""
res = DB()
db = {}
for pkg in filter(filter_data, six.iterkeys(self.db)):
db[pkg] = self.db[pkg].copy()
res.db = db
res.rdb = reverse(db)
return res
filterPackagesCopy = function_deprecated_by(filter_packages_copy)
def filter_packages_tags(self, package_tag_filter):
# type: (PkgTagFilterType) -> DB
"""
Return a collection with only those packages that match a
filter, sharing tagsets with this one. The filter will match
on (package, tags).
"""
res = DB()
db = {}
for pkg, _ in filter(package_tag_filter, six.iteritems(self.db)):
db[pkg] = self.db[pkg]
res.db = db
res.rdb = reverse(db)
return res
filterPackagesTags = function_deprecated_by(filter_packages_tags)
def filter_packages_tags_copy(self, package_tag_filter):
# type: (PkgTagFilterType) -> DB
"""
Return a collection with only those packages that match a
filter, with a copy of the tagsets of this one. The filter
will match on (package, tags).
"""
res = DB()
db = {}
for pkg, _ in filter(package_tag_filter, six.iteritems(self.db)):
db[pkg] = self.db[pkg].copy()
res.db = db
res.rdb = reverse(db)
return res
filterPackagesTagsCopy = function_deprecated_by(filter_packages_tags_copy)
def filter_tags(self, tag_filter):
# type: (TagFilterType) -> DB
"""
Return a collection with only those tags that match a
filter, sharing package sets with this one. The filter will match
on the tag.
"""
res = DB()
rdb = {}
for tag in filter(tag_filter, six.iterkeys(self.rdb)):
rdb[tag] = self.rdb[tag]
res.rdb = rdb
res.db = reverse(rdb)
return res
filterTags = function_deprecated_by(filter_tags)
def filter_tags_copy(self, tag_filter):
# type: (TagFilterType) -> DB
"""
Return a collection with only those tags that match a
filter, with a copy of the package sets of this one. The
filter will match on the tag.
"""
res = DB()
rdb = {}
for tag in filter(tag_filter, six.iterkeys(self.rdb)):
rdb[tag] = self.rdb[tag].copy()
res.rdb = rdb
res.db = reverse(rdb)
return res
filterTagsCopy = function_deprecated_by(filter_tags_copy)
def has_package(self, pkg):
# type: (str) -> bool
"""Check if the collection contains the given package"""
return pkg in self.db
hasPackage = function_deprecated_by(has_package)
def has_tag(self, tag):
# type: (str) -> bool
"""Check if the collection contains packages tagged with tag"""
return tag in self.rdb
hasTag = function_deprecated_by(has_tag)
def tags_of_package(self, pkg):
# type: (str) -> Set[str]
"""Return the tag set of a package"""
return self.db[pkg] if pkg in self.db else set()
tagsOfPackage = function_deprecated_by(tags_of_package)
def packages_of_tag(self, tag):
# type: (str) -> Set[str]
"""Return the package set of a tag"""
return self.rdb[tag] if tag in self.rdb else set()
packagesOfTag = function_deprecated_by(packages_of_tag)
def tags_of_packages(self, pkgs):
# type: (Iterable[str]) -> Set[str]
"""Return the set of tags that have all the packages in ``pkgs``"""
return set.union(*(self.tags_of_package(p) for p in pkgs))
tagsOfPackages = function_deprecated_by(tags_of_packages)
def packages_of_tags(self, tags):
# type: (Iterable[str]) -> Set[str]
"""Return the set of packages that have all the tags in ``tags``"""
return set.union(*(self.packages_of_tag(t) for t in tags))
packagesOfTags = function_deprecated_by(packages_of_tags)
def card(self, tag):
# type: (str) -> int
"""
Return the cardinality of a tag
"""
return len(self.rdb[tag]) if tag in self.rdb else 0
def discriminance(self, tag):
# type: (str) -> int
"""
Return the discriminance index if the tag.
Th discriminance index of the tag is defined as the minimum
number of packages that would be eliminated by selecting only
those tagged with this tag or only those not tagged with this
tag.
"""
n = self.card(tag)
tot = self.package_count()
return min(n, tot - n)
def iter_packages(self):
# type: () -> Iterator[str]
"""Iterate over the packages"""
return six.iterkeys(self.db)
iterPackages = function_deprecated_by(iter_packages)
def iter_tags(self):
# type: () -> Iterator[str]
"""Iterate over the tags"""
return six.iterkeys(self.rdb)
iterTags = function_deprecated_by(iter_tags)
def iter_packages_tags(self):
# type: () -> Iterator[Tuple[str, Set[str]]]
"""Iterate over 2-tuples of (pkg, tags)"""
return six.iteritems(self.db)
iterPackagesTags = function_deprecated_by(iter_packages_tags)
def iter_tags_packages(self):
# type: () -> Iterator[Tuple[str, Set[str]]]
"""Iterate over 2-tuples of (tag, pkgs)"""
return six.iteritems(self.rdb)
iterTagsPackages = function_deprecated_by(iter_tags_packages)
def package_count(self):
# type: () -> int
"""Return the number of packages"""
return len(self.db)
packageCount = function_deprecated_by(package_count)
def tag_count(self):
# type: () -> int
"""Return the number of tags"""
return len(self.rdb)
tagCount = function_deprecated_by(tag_count)
def ideal_tagset(self, tags):
# type: (List[str]) -> Set[str]
"""
Return an ideal selection of the top tags in a list of tags.
Return the tagset made of the highest number of tags taken in
consecutive sequence from the beginning of the given vector,
that would intersect with the tagset of a comfortable amount
of packages.
Comfortable is defined in terms of how far it is from 7.
"""
# TODO: the scoring function is quite ok, but may need more
# tuning. I also center it on 15 instead of 7 since we're
# setting a starting point for the search, not a target point
def score_fun(x):
return float((x-15)*(x-15))/x
tagset = set() # type: Set[str]
min_score = 3
for i in range(len(tags)):
pkgs = self.packages_of_tags(tags[:i+1])
card = len(pkgs)
if card == 0:
break
score = score_fun(card)
if score < min_score:
min_score = score
tagset = set(tags[:i+1])
# Return always at least the first tag
if not tagset:
return set(tags[:1])
return tagset
idealTagset = function_deprecated_by(ideal_tagset)
def correlations(self):
# type: () -> Iterator[Tuple[str, str, float]]
"""
Generate the list of correlation as a tuple (hastag, hasalsotag, score).
Every tuple will indicate that the tag 'hastag' tends to also
have 'hasalsotag' with a score of 'score'.
"""
for pivot in self.iter_tags():
# pylint: disable=cell-var-from-loop
with_ = self.filter_packages_tags(lambda pt: pivot in pt[1])
without = self.filter_packages_tags(lambda pt: pivot not in pt[1])
for tag in with_.iter_tags():
if tag == pivot:
continue
has = float(with_.card(tag)) / float(with_.package_count())
hasnt = float(without.card(tag)) / float(without.package_count())
yield pivot, tag, has - hasnt
| 32.442568
| 92
| 0.596741
|
fa9d5e70e66f588cfb08d8a932fb5d8088429b6b
| 32,425
|
py
|
Python
|
scale/util/test/test_rest.py
|
kaydoh/scale
|
1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee
|
[
"Apache-2.0"
] | 121
|
2015-11-18T18:15:33.000Z
|
2022-03-10T01:55:00.000Z
|
scale/util/test/test_rest.py
|
kaydoh/scale
|
1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee
|
[
"Apache-2.0"
] | 1,415
|
2015-12-23T23:36:04.000Z
|
2022-01-07T14:10:09.000Z
|
scale/util/test/test_rest.py
|
kaydoh/scale
|
1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee
|
[
"Apache-2.0"
] | 66
|
2015-12-03T20:38:56.000Z
|
2020-07-27T15:28:11.000Z
|
from __future__ import unicode_literals
import datetime
import django
import mock
from django.http import QueryDict
from django.test import TestCase
from django.utils.timezone import utc
from mock import MagicMock
from rest_framework.request import Request
import util.rest as rest_util
from util.rest import BadParameter, ReadOnly
class TestRest(TestCase):
def setUp(self):
django.setup()
def test_check_update(self):
"""Tests checking a white-list of parameters allowed to be updated during a POST."""
request = MagicMock(Request)
request.data = QueryDict('', mutable=True)
request.data.update({
'test': 'value1',
})
self.assertTrue(rest_util.check_update(request, ['test']))
def test_check_bad_param_type(self):
"""Tests checking a white-list of invalid parameters allowed to be updated during a POST."""
request = MagicMock(Request)
request.data = QueryDict('', mutable=True)
request.data.update({
'test1': 'value1',
'test2': 'value2',
})
self.assertRaises(AssertionError, rest_util.check_update, request, 'test1')
def test_check_update_invalid(self):
"""Tests checking a white-list of invalid parameters allowed to be updated during a POST."""
request = MagicMock(Request)
request.data = QueryDict('', mutable=True)
request.data.update({
'test1': 'value1',
'test2': 'value2',
})
self.assertRaises(ReadOnly, rest_util.check_update, request, ['test1'])
def test_check_time_range(self):
"""Tests checking a time range is valid."""
self.assertTrue(rest_util.check_time_range(datetime.datetime(2015, 1, 1, tzinfo=utc),
datetime.datetime(2015, 1, 30, tzinfo=utc)))
def test_check_time_range_partial(self):
"""Tests checking a partial time range is valid."""
self.assertTrue(rest_util.check_time_range(datetime.datetime(2015, 1, 1, tzinfo=utc), None))
self.assertTrue(rest_util.check_time_range(None, datetime.datetime(2015, 1, 30, tzinfo=utc)))
def test_check_time_range_equal(self):
"""Tests checking a time range that is invalid due to being equal."""
self.assertRaises(BadParameter, rest_util.check_time_range, datetime.datetime(2015, 1, 1, tzinfo=utc),
datetime.datetime(2015, 1, 1, tzinfo=utc))
def test_check_time_range_flipped(self):
"""Tests checking a time range that is invalid due to start being after end."""
self.assertRaises(BadParameter, rest_util.check_time_range, datetime.datetime(2015, 1, 30, tzinfo=utc),
datetime.datetime(2015, 1, 1, tzinfo=utc))
def test_check_time_range_duration(self):
"""Tests checking a time range that is invalid due to max duration exceeded."""
self.assertRaises(BadParameter, rest_util.check_time_range, datetime.datetime(2015, 1, 1, tzinfo=utc),
datetime.datetime(2015, 3, 1, tzinfo=utc), datetime.timedelta(days=31))
def test_check_together_empty(self):
"""Tests checking multiple parameters together when none are given."""
self.assertFalse(rest_util.check_together([], []))
def test_check_together_none(self):
"""Tests checking multiple parameters together when none are given."""
self.assertFalse(rest_util.check_together(['test1', 'test2'], [None, None]))
def test_check_together_single(self):
"""Tests checking multiple parameters together when one is given."""
self.assertFalse(rest_util.check_together(['test1'], [None]))
def test_check_together_partial(self):
"""Tests checking multiple parameters together when some are given."""
self.assertRaises(BadParameter, rest_util.check_together, ['test1', 'test2'], ['value1', None])
def test_check_together_all(self):
"""Tests checking multiple parameters together."""
self.assertTrue(rest_util.check_together(['test1', 'test2'], ['value1', 'value2']))
def test_has_params_empty(self):
"""Tests checking parameter presence when none are given."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
self.assertFalse(rest_util.has_params(request))
def test_has_params_none(self):
"""Tests checking parameter presence when none are given."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
self.assertFalse(rest_util.has_params(request, None, None))
def test_has_params_single(self):
"""Tests checking parameter presence when one is given."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test1': 'value1',
})
self.assertTrue(rest_util.has_params(request, 'test1'))
def test_has_params_partial(self):
"""Tests checking parameter presence when some are given."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test1': 'value1',
})
self.assertFalse(rest_util.has_params(request, 'test1', 'test2'))
def test_has_params_all(self):
"""Tests checking parameter presence when all are given."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test1': 'value1',
'test2': None,
})
self.assertTrue(rest_util.has_params(request, 'test1', 'test2'))
def test_parse_string(self):
"""Tests parsing a required string parameter that is provided via GET."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': 'value1',
})
self.assertEqual(rest_util.parse_string(request, 'test'), 'value1')
def test_parse_string_missing(self):
"""Tests parsing a required string parameter that is missing."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': 'value1',
})
self.assertRaises(BadParameter, rest_util.parse_string, request, 'test2')
def test_parse_string_default(self):
"""Tests parsing an optional string parameter that is provided via default value."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': 'value1',
})
self.assertEqual(rest_util.parse_string(request, 'test2', 'value2'), 'value2')
def test_parse_string_optional(self):
"""Tests parsing an optional string parameter that is missing."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': 'value1',
})
self.assertIsNone(rest_util.parse_string(request, 'test2', required=False))
def test_parse_string_accepted_none(self):
"""Tests parsing a string parameter where the value is not acceptable."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': 'value1',
})
self.assertRaises(BadParameter, rest_util.parse_string, request, 'test', accepted_values=['value'])
def test_parse_string_accepted_all(self):
"""Tests parsing a string parameter where the value is acceptable."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': 'value1',
})
self.assertEqual(rest_util.parse_string(request, 'test', accepted_values=['value1']), 'value1')
def test_parse_string_post(self):
"""Tests parsing a required string parameter that is provided via POST."""
request = MagicMock(Request)
request.data = QueryDict('', mutable=True)
request.data.update({
'test': 'value1',
})
self.assertEqual(rest_util.parse_string(request, 'test'), 'value1')
def test_parse_string_list(self):
"""Tests parsing a required list of string parameters that is provided via GET."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.setlist('test', ['value1', 'value2'])
self.assertListEqual(rest_util.parse_string_list(request, 'test'), ['value1', 'value2'])
def test_parse_string_list_missing(self):
"""Tests parsing a required list of string parameters that are missing."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.setlist('test', ['value1', 'value2'])
self.assertRaises(BadParameter, rest_util.parse_string_list, request, 'test2')
def test_parse_string_list_default(self):
"""Tests parsing a required list of string parameters that are provided via default value."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.setlist('test', ['value1'])
self.assertEqual(rest_util.parse_string_list(request, 'test2', ['value2', 'value3']), ['value2', 'value3'])
def test_parse_string_list_optional(self):
"""Tests parsing an optional list of string parameters that are missing."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.setlist('test', ['value1'])
self.assertListEqual(rest_util.parse_string_list(request, 'test2', required=False), [])
def test_parse_string_list_accepted_none(self):
"""Tests parsing a list of string parameters where none of the values are acceptable."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.setlist('test', ['value1', 'value2'])
self.assertRaises(BadParameter, rest_util.parse_string_list, request, 'test', accepted_values=['value'])
def test_parse_string_list_accepted_partial(self):
"""Tests parsing a list of string parameters where only some values are acceptable."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.setlist('test', ['value1', 'value2'])
self.assertRaises(BadParameter, rest_util.parse_string_list, request, 'test', accepted_values=['value1'])
def test_parse_string_list_accepted_all(self):
"""Tests parsing a list of string parameters where all values are acceptable."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.setlist('test', ['value1', 'value2'])
self.assertListEqual(rest_util.parse_string_list(request, 'test', accepted_values=['value1', 'value2']),
['value1', 'value2'])
def test_parse_string_list_post(self):
"""Tests parsing a required list of string parameters that are provided via POST."""
request = MagicMock(Request)
request.data = QueryDict('', mutable=True)
request.data.update({
'test': ['value1', 'value2']
})
self.assertEqual(rest_util.parse_string_list(request, 'test'), ['value1', 'value2'])
def test_parse_bool_true(self):
"""Tests parsing a required bool parameter that is provided via GET."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test1': 'TRUE',
'test2': 'True',
'test3': 'true',
'test4': 't',
'test5': '1',
})
self.assertTrue(rest_util.parse_bool(request, 'test1'))
self.assertTrue(rest_util.parse_bool(request, 'test2'))
self.assertTrue(rest_util.parse_bool(request, 'test3'))
self.assertTrue(rest_util.parse_bool(request, 'test4'))
self.assertTrue(rest_util.parse_bool(request, 'test5'))
def test_parse_bool_false(self):
"""Tests parsing a required bool parameter that is provided via GET."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test1': 'FALSE',
'test2': 'False',
'test3': 'false',
'test4': 'f',
'test5': '0',
})
self.assertFalse(rest_util.parse_bool(request, 'test1'))
self.assertFalse(rest_util.parse_bool(request, 'test2'))
self.assertFalse(rest_util.parse_bool(request, 'test3'))
self.assertFalse(rest_util.parse_bool(request, 'test4'))
self.assertFalse(rest_util.parse_bool(request, 'test5'))
def test_parse_bool_missing(self):
"""Tests parsing a required bool parameter that is missing."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': 'true',
})
self.assertRaises(BadParameter, rest_util.parse_bool, request, 'test2')
def test_parse_bool_default(self):
"""Tests parsing an optional bool parameter that is provided via default value."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': 'true',
})
self.assertFalse(rest_util.parse_bool(request, 'test2', False))
def test_parse_bool_optional(self):
"""Tests parsing an optional bool parameter that is missing."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': 'true',
})
self.assertIsNone(rest_util.parse_bool(request, 'test2', required=False))
def test_parse_bool_post(self):
"""Tests parsing a required bool parameter that is provided via POST."""
request = MagicMock(Request)
request.data = QueryDict('', mutable=True)
request.data.update({
'test': 'true',
})
self.assertTrue(rest_util.parse_bool(request, 'test'))
def test_parse_int(self):
"""Tests parsing a required int parameter that is provided via GET."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': '10',
})
self.assertEqual(rest_util.parse_int(request, 'test'), 10)
def test_parse_int_missing(self):
"""Tests parsing a required int parameter that is missing."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': '10',
})
self.assertRaises(BadParameter, rest_util.parse_int, request, 'test2')
def test_parse_int_default(self):
"""Tests parsing a required int parameter that is provided via default value."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': '10',
})
self.assertEqual(rest_util.parse_int(request, 'test2', 20), 20)
def test_parse_int_optional(self):
"""Tests parsing an optional int parameter that is missing."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': 'value1',
})
self.assertIsNone(rest_util.parse_int(request, 'test2', required=False))
def test_parse_int_accepted_none(self):
"""Tests parsing an int parameter where the value is not acceptable."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': '1',
})
self.assertRaises(BadParameter, rest_util.parse_int, request, 'test', accepted_values=[5, 10])
def test_parse_int_accepted_all(self):
"""Tests parsing an int parameter where the value is acceptable."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': '1',
})
self.assertEqual(rest_util.parse_int(request, 'test', accepted_values=[1, 2, 3]), 1)
def test_parse_int_zero(self):
"""Tests parsing an optional int parameter zero instead of using the default value."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': '0',
})
self.assertEqual(rest_util.parse_int(request, 'test', 10), 0)
def test_parse_int_invalid(self):
"""Tests parsing a required int parameter that is not a valid number."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': 'abc',
})
self.assertRaises(BadParameter, rest_util.parse_int, request, 'test')
def test_parse_int_post(self):
"""Tests parsing a required int parameter that is provided via POST."""
request = MagicMock(Request)
request.data = QueryDict('', mutable=True)
request.data.update({
'test': '10',
})
self.assertEqual(rest_util.parse_int(request, 'test'), 10)
def test_parse_int_list(self):
"""Tests parsing a required list of int parameters that is provided via GET."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.setlist('test', ['1', '2'])
self.assertListEqual(rest_util.parse_int_list(request, 'test'), [1, 2])
def test_parse_int_list_missing(self):
"""Tests parsing a required list of int parameters that are missing."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.setlist('test', ['1', '2'])
self.assertRaises(BadParameter, rest_util.parse_int_list, request, 'test2')
def test_parse_int_list_default(self):
"""Tests parsing a required list of int parameters that are provided via default value."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.setlist('test', ['1'])
self.assertEqual(rest_util.parse_int_list(request, 'test2', ['2', '3']), [2, 3])
def test_parse_int_list_optional(self):
"""Tests parsing an optional list of int parameters that are missing."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.setlist('test', ['1'])
self.assertListEqual(rest_util.parse_int_list(request, 'test2', required=False), [])
def test_parse_int_list_accepted_none(self):
"""Tests parsing a list of int parameters where none of the values are acceptable."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.setlist('test', ['1', '2'])
self.assertRaises(BadParameter, rest_util.parse_int_list, request, 'test', accepted_values=[3])
def test_parse_int_list_accepted_partial(self):
"""Tests parsing a list of int parameters where only some values are acceptable."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.setlist('test', ['1', '2'])
self.assertRaises(BadParameter, rest_util.parse_int_list, request, 'test', accepted_values=[1])
def test_parse_int_list_accepted_all(self):
"""Tests parsing a list of int parameters where all values are acceptable."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.setlist('test', ['1', '2'])
self.assertListEqual(rest_util.parse_int_list(request, 'test', accepted_values=[1, 2]), [1, 2])
def test_parse_int_list_post(self):
"""Tests parsing a required list of int parameters that are provided via POST."""
request = MagicMock(Request)
request.data = QueryDict('', mutable=True)
request.data.update({
'test': ['1', '2']
})
self.assertEqual(rest_util.parse_int_list(request, 'test'), [1, 2])
def test_parse_float(self):
"""Tests parsing a required float parameter that is provided via GET."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': '10.1',
})
self.assertEqual(rest_util.parse_float(request, 'test'), 10.1)
def test_parse_float_missing(self):
"""Tests parsing a required float parameter that is missing."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': '10.1',
})
self.assertRaises(BadParameter, rest_util.parse_float, request, 'test2')
def test_parse_float_default(self):
"""Tests parsing a required float parameter that is provided via default value."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': '10.1',
})
self.assertEqual(rest_util.parse_float(request, 'test2', 20.1), 20.1)
def test_parse_float_optional(self):
"""Tests parsing an optional float parameter that is missing."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': 'value1',
})
self.assertIsNone(rest_util.parse_float(request, 'test2', required=False))
def test_parse_float_accepted_none(self):
"""Tests parsing a float parameter where the value is not acceptable."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': '1.0',
})
self.assertRaises(BadParameter, rest_util.parse_float, request, 'test', accepted_values=[5.0, 10.0])
def test_parse_float_valid(self):
"""Tests parsing a float parameter where the value is acceptable."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': '1.1',
})
self.assertEqual(rest_util.parse_float(request, 'test', accepted_values=[1.1, 2.2, 3.3]), 1.1)
def test_parse_float_zero(self):
"""Tests parsing an optional float parameter zero instead of using the default value."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': '0.0',
})
self.assertEqual(rest_util.parse_float(request, 'test', 10.1), 0.0)
def test_parse_float_invalid(self):
"""Tests parsing a required float parameter that is not a valid number."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': 'abc',
})
self.assertRaises(BadParameter, rest_util.parse_float, request, 'test')
def test_parse_float_post(self):
"""Tests parsing a required float parameter that is provided via POST."""
request = MagicMock(Request)
request.data = QueryDict('', mutable=True)
request.data.update({
'test': '10.1',
})
self.assertEqual(rest_util.parse_float(request, 'test'), 10.1)
def test_parse_duration(self):
"""Tests parsing a required ISO duration parameter that is provided via GET."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': 'PT3H0M0S',
})
self.assertEqual(rest_util.parse_duration(request, 'test'), datetime.timedelta(0, 10800))
def test_parse_duration_missing(self):
"""Tests parsing a required ISO duration parameter that is missing."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': '10',
})
self.assertRaises(BadParameter, rest_util.parse_duration, request, 'test2')
def test_parse_duration_default(self):
"""Tests parsing a required ISO duration parameter that is provided via default value."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': 'PT3H0M0S',
})
default_value = datetime.timedelta(0, 20800)
self.assertEqual(rest_util.parse_duration(request, 'test2', default_value), default_value)
def test_parse_duration_optional(self):
"""Tests parsing an optional ISO duration parameter that is missing."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': 'value1',
})
self.assertIsNone(rest_util.parse_duration(request, 'test2', required=False))
def test_parse_duration_invalid(self):
"""Tests parsing a required ISO duration parameter that is formatted incorrectly."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': 'BAD',
})
self.assertRaises(BadParameter, rest_util.parse_duration, request, 'test')
def test_parse_datetime(self):
"""Tests parsing a valid ISO datetime."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': '2015-01-01T00:00:00Z',
})
self.assertEqual(rest_util.parse_datetime(request, 'test'), datetime.datetime(2015, 1, 1, tzinfo=utc))
def test_parse_datetime_missing(self):
"""Tests parsing a required ISO datetime parameter that is missing."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': '10',
})
self.assertRaises(BadParameter, rest_util.parse_datetime, request, 'test2')
def test_parse_datetime_default(self):
"""Tests parsing a required ISO datetime parameter that is provided via default value."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': '2015-01-01T00:00:00Z',
})
default_value = datetime.datetime(2015, 2, 10, tzinfo=utc)
self.assertEqual(rest_util.parse_datetime(request, 'test2', default_value), default_value)
def test_parse_datetime_optional(self):
"""Tests parsing an optional ISO datetime parameter that is missing."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': 'value1',
})
self.assertIsNone(rest_util.parse_datetime(request, 'test2', required=False))
def test_parse_datetime_invalid(self):
"""Tests parsing a required ISO datetime parameter that is formatted incorrectly."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': '20150101T00:00:00Z',
})
self.assertRaises(BadParameter, rest_util.parse_datetime, request, 'test')
def test_parse_datetime_missing_timezone(self):
"""Tests parsing an ISO datetime missing a timezone."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': '2015-01-01T00:00:00',
})
self.assertRaises(BadParameter, rest_util.parse_datetime, request, 'test')
@mock.patch('django.utils.timezone.now')
def test_parse_timestamp_duration(self, mock_now):
"""Tests parsing a valid ISO duration."""
mock_now.return_value = datetime.datetime(2015, 1, 1, 10, tzinfo=utc)
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': 'PT3H0M0S',
})
self.assertEqual(rest_util.parse_timestamp(request, 'test'),
datetime.datetime(2015, 1, 1, 7, tzinfo=utc))
def test_parse_timestamp_datetime(self):
"""Tests parsing a valid ISO datetime."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': '2015-01-01T00:00:00Z',
})
self.assertEqual(rest_util.parse_timestamp(request, 'test'), datetime.datetime(2015, 1, 1, tzinfo=utc))
def test_parse_dict(self):
"""Tests parsing a dictionary."""
result = {
'name': 'value',
}
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
request.query_params.update({
'test': result,
})
self.assertDictEqual(rest_util.parse_dict(request, 'test'), result)
def test_parse_dict_post(self):
"""Tests parsing a dictionary provided via POST."""
result = {
'name': 'value',
}
request = MagicMock(Request)
request.data = QueryDict('', mutable=True)
request.data.update({
'test': result,
})
self.assertDictEqual(rest_util.parse_dict(request, 'test'), result)
def test_parse_dict_optional(self):
"""Tests parsing an optional dict with no default value."""
request = MagicMock(Request)
request.query_params = QueryDict('', mutable=True)
self.assertDictEqual(rest_util.parse_dict(request, 'test', required=False), {})
def test_parse_dict_list_post(self):
"""Tests parsing a list of dictionaries."""
results = [{ 'name': 'value' }, { 'name2': 'value2' }]
request = MagicMock(Request)
request.data = QueryDict('', mutable=True)
request.data.update({
'test': results,
})
self.assertItemsEqual(rest_util.parse_dict_list(request, 'test'), results)
def test_parse_dict_list_invalid_post(self):
"""Tests parsing a required list of dictionaries that is not all dictionaries."""
results = [{'name': 'value'}, 'BAD']
request = MagicMock(Request)
request.data = QueryDict('', mutable=True)
request.data.update({
'test': results,
})
self.assertRaises(BadParameter, rest_util.parse_dict_list, request, 'test')
def test_parse_dict_list_optional(self):
"""Tests parsing an optional dict list with no default value."""
request = MagicMock(Request)
request.data = QueryDict('', mutable=True)
self.assertEqual(rest_util.parse_dict_list(request, 'test', required=False), [])
def test_title_to_name(self):
"""Tests parsing an optional dict with no default value."""
title1 = 'Boring Normal Title'
title2 = 'Underscore_Title'
title3 = 'Title #1'
set = None
self.assertEqual(rest_util.title_to_name(set, title1), 'boring-normal-title')
self.assertEqual(rest_util.title_to_name(set, title2), 'underscore-title')
self.assertEqual(rest_util.title_to_name(set, title3), 'title-1')
| 43.003979
| 115
| 0.64478
|
c1f476bd225b71942c936cca89c6c595bb9ae6a2
| 3,104
|
py
|
Python
|
training/train_model.py
|
googleinterns/smart-news-query-embeddings
|
5c0f354100e3c92ad3e9c0b9ca43f3307b8b68c5
|
[
"Apache-2.0"
] | null | null | null |
training/train_model.py
|
googleinterns/smart-news-query-embeddings
|
5c0f354100e3c92ad3e9c0b9ca43f3307b8b68c5
|
[
"Apache-2.0"
] | 11
|
2020-08-06T18:53:39.000Z
|
2022-02-10T01:56:49.000Z
|
training/train_model.py
|
googleinterns/smart-news-query-embeddings
|
5c0f354100e3c92ad3e9c0b9ca43f3307b8b68c5
|
[
"Apache-2.0"
] | 1
|
2020-09-02T08:20:14.000Z
|
2020-09-02T08:20:14.000Z
|
"""
Copyright 2020 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import time
from smart_news_query_embeddings.trainers.bert_model_trainer import BertModelTrainer
from smart_news_query_embeddings.trainers.two_tower_model_trainer import TwoTowerModelTrainer
from smart_news_query_embeddings.trainers.bert_model_specificity_score_trainer import BertModelSpecificityScoreTrainer
"""
Script that instantiates a trainer and trains the model.
"""
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', '-b', default=32, type=int)
parser.add_argument('--learning-rate', '-l', default=1e-5, type=float)
parser.add_argument('--max-seq-length', default=128, type=int)
parser.add_argument('--dropout-rate', default=0.5, type=float)
parser.add_argument('--num-train-epochs', '-n', default=3, type=int)
parser.add_argument('--dense-size', '-d', default=256, type=int)
parser.add_argument('--exp-name', '-e', required=True, type=str)
parser.add_argument('--bert-dir', default='uncased_L-12_H-768_A-12', type=str)
parser.add_argument('--two-tower', '-t', action='store_true', default=False)
parser.add_argument('--specificity-scores', '-s', action='store_true', default=False)
parser.add_argument('--cutoff', '-c', default=0.5, type=float)
parser.add_argument('--train-tail', action='store_true', default=False)
parser.add_argument('--no-batch-norm', action='store_true', default=False)
args = parser.parse_args()
if args.two_tower:
trainer = TwoTowerModelTrainer(args.exp_name, batch_size=args.batch_size, learning_rate=args.learning_rate,
max_seq_length=args.max_seq_length, dropout_rate=args.dropout_rate, epochs=args.num_train_epochs,
dense_size=args.dense_size, bert_dir=args.bert_dir, use_batch_norm=not args.no_batch_norm)
elif args.specificity_scores:
trainer = BertModelSpecificityScoreTrainer(args.exp_name, batch_size=args.batch_size, learning_rate=args.learning_rate,
max_seq_length=args.max_seq_length, dropout_rate=args.dropout_rate, epochs=args.num_train_epochs,
dense_size=args.dense_size, bert_dir=args.bert_dir, tail_cutoff=args.cutoff, train_tail=args.train_tail)
else:
trainer = BertModelTrainer(args.exp_name, batch_size=args.batch_size, learning_rate=args.learning_rate,
max_seq_length=args.max_seq_length, dropout_rate=args.dropout_rate, epochs=args.num_train_epochs,
dense_size=args.dense_size, bert_dir=args.bert_dir)
print('Trainer class is: {}'.format(type(trainer)))
trainer.train()
| 53.517241
| 127
| 0.761276
|
ef67c7a8be76af61d823ee447dc272d58105caab
| 8,787
|
py
|
Python
|
build.py
|
k0sukey/TiSBTickerView
|
097a05801365db6857d1ef902c257d69cab257f3
|
[
"Unlicense",
"MIT"
] | 1
|
2016-11-16T19:30:17.000Z
|
2016-11-16T19:30:17.000Z
|
build.py
|
k0sukey/TiSBTickerView
|
097a05801365db6857d1ef902c257d69cab257f3
|
[
"Unlicense",
"MIT"
] | null | null | null |
build.py
|
k0sukey/TiSBTickerView
|
097a05801365db6857d1ef902c257d69cab257f3
|
[
"Unlicense",
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string, optparse, subprocess
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
warn("Couldn't find documentation file at: %s" % docdir)
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','be.k0suke.tisbtickerview.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','BeK0sukeTisbtickerviewModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def info(msg):
print "[INFO] %s" % msg
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignoreExt=[]):
if not os.path.exists(dir): return
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] in ignoreExt: continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, '%s/%s'%(basepath,dir), 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def generate_apidoc(apidoc_build_path):
global options
if options.skip_docs:
info("Skipping documentation generation.")
return False
else:
info("Module apidoc generation can be skipped using --skip-docs")
apidoc_path = os.path.join(cwd, "apidoc")
if not os.path.exists(apidoc_path):
warn("Skipping apidoc generation. No apidoc folder found at: %s" % apidoc_path)
return False
if not os.path.exists(apidoc_build_path):
os.makedirs(apidoc_build_path)
ti_root = string.strip(subprocess.check_output(["echo $TI_ROOT"], shell=True))
if not len(ti_root) > 0:
warn("Not generating documentation from the apidoc folder. The titanium_mobile repo could not be found.")
warn("Set the TI_ROOT environment variable to the parent folder where the titanium_mobile repo resides (eg.'export TI_ROOT=/Path').")
return False
docgen = os.path.join(ti_root, "titanium_mobile", "apidoc", "docgen.py")
if not os.path.exists(docgen):
warn("Not generating documentation from the apidoc folder. Couldn't find docgen.py at: %s" % docgen)
return False
info("Generating documentation from the apidoc folder.")
rc = os.system("\"%s\" --format=jsca,modulehtml --css=styles.css -o \"%s\" -e \"%s\"" % (docgen, apidoc_build_path, apidoc_path))
if rc != 0:
die("docgen failed")
return True
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
apidoc_build_path = os.path.join(cwd, "build", "apidoc")
if generate_apidoc(apidoc_build_path):
for file in os.listdir(apidoc_build_path):
if file in ignoreFiles or os.path.isdir(os.path.join(apidoc_build_path, file)):
continue
zf.write(os.path.join(apidoc_build_path, file), '%s/documentation/apidoc/%s' % (modulepath, file))
zip_dir(zf,'assets',modulepath,['.pyc','.js'])
zip_dir(zf,'example',modulepath,['.pyc'])
zip_dir(zf,'platform',modulepath,['.pyc','.js'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
global options
parser = optparse.OptionParser()
parser.add_option("-s", "--skip-docs",
dest="skip_docs",
action="store_true",
help="Will skip building documentation in apidoc folder",
default=False)
(options, args) = parser.parse_args()
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
| 31.836957
| 135
| 0.709116
|
2094971b8c9beb6ca24941a01c25f083b08474bb
| 3,714
|
py
|
Python
|
post_tool/post_area.py
|
sus304/ForRocketWorkbench
|
453613ecde282cc8a2b481250f29e6de7438a230
|
[
"Apache-2.0"
] | null | null | null |
post_tool/post_area.py
|
sus304/ForRocketWorkbench
|
453613ecde282cc8a2b481250f29e6de7438a230
|
[
"Apache-2.0"
] | null | null | null |
post_tool/post_area.py
|
sus304/ForRocketWorkbench
|
453613ecde282cc8a2b481250f29e6de7438a230
|
[
"Apache-2.0"
] | null | null | null |
import os
import glob
import numpy as np
from tqdm import tqdm
from post_tool.post_df import csv2df
from post_tool.post_summary import post_summary
from post_tool.post_kml import dump_area_kml
def post_area(area_work_dir):
os.chdir(area_work_dir)
# ディレクトリ内の*_flight_log.csvをリストアップする
log_file_list = glob.glob('*_flight_log.csv')
# stage毎に振り分け
# 弾道と減速を振り分け
# Stage1のみ対応
stage1_log_file_list = []
stage1_ballistic_log_file_list = []
stage2_log_file_list = []
stage3_log_file_list = []
for file in tqdm(log_file_list):
if '_stage1_' in file:
if '_ballistic_' in file:
stage1_ballistic_log_file_list.append(file)
else:
stage1_log_file_list.append(file)
elif '_stage2_' in file:
stage2_log_file_list.append(file)
elif '_stage3_' in file:
stage3_log_file_list.append(file)
# ケース条件から風向風速の数を検出
case_param_list = np.loadtxt('wind_case_list.csv', delimiter=',', skiprows=1)
previous_speed = case_param_list[0][1]
direction_count = 0
for i in range(len(case_param_list)):
# 風向数を検出
if previous_speed != case_param_list[i][1]:
direction_count = i
break
speed_count = int(len(case_param_list) / direction_count) # 風速数を検出
# ケースナンバーからログファイルを振り分け
direction_loop_list = []
speed_loop_list = []
for i in tqdm(range(len(case_param_list))):
case_num = int(case_param_list[i][0])
for file in stage1_log_file_list:
if '_wind'+str(case_num)+'_' in file:
direction_loop_list.append(file)
break
if len(direction_loop_list) == direction_count:
speed_loop_list.append(direction_loop_list)
direction_loop_list = []
ballicstic_speed_loop_list = []
if len(stage1_ballistic_log_file_list) != 0:
for i in tqdm(range(len(case_param_list))):
case_num = int(case_param_list[i][0])
for file in stage1_ballistic_log_file_list:
if '_wind'+str(case_num)+'_' in file:
direction_loop_list.append(file)
break
if len(direction_loop_list) == direction_count:
ballicstic_speed_loop_list.append(direction_loop_list)
direction_loop_list = []
# 着地点を抽出してkmlへ
speed_impact_points_latlon = []
direction_impact_points_latlon = []
for direction_log_files in tqdm(speed_loop_list):
for log_file in direction_log_files:
case_name = os.path.split(log_file)[-1]
df, _, _ = csv2df(log_file)
_, latlon = post_summary(df, case_name)
direction_impact_points_latlon.append(latlon)
speed_impact_points_latlon.append(direction_impact_points_latlon)
direction_impact_points_latlon = []
if len(stage1_ballistic_log_file_list) == 0:
dump_area_kml(speed_impact_points_latlon, 'ballistic')
else:
dump_area_kml(speed_impact_points_latlon, 'decent')
speed_impact_points_latlon = []
direction_impact_points_latlon = []
for direction_log_files in tqdm(ballicstic_speed_loop_list):
for log_file in direction_log_files:
case_name = os.path.split(log_file)[-1]
df, _, _ = csv2df(log_file)
_, latlon = post_summary(df, case_name)
direction_impact_points_latlon.append(latlon)
speed_impact_points_latlon.append(direction_impact_points_latlon)
direction_impact_points_latlon = []
dump_area_kml(speed_impact_points_latlon, 'ballistic')
os.chdir('../')
| 36.058252
| 81
| 0.651319
|
303c319fcd1dd6304665954e517d8b0281190f65
| 2,288
|
py
|
Python
|
tests/test_philips_tiff_tiler.py
|
imi-bigpicture/opentile
|
1a84284c5bc2c3515e14d5345b6077842897b547
|
[
"Apache-2.0"
] | 4
|
2021-12-02T17:19:10.000Z
|
2022-02-02T16:35:48.000Z
|
tests/test_philips_tiff_tiler.py
|
sectra-medical/opentile
|
1a84284c5bc2c3515e14d5345b6077842897b547
|
[
"Apache-2.0"
] | 6
|
2021-12-02T13:22:04.000Z
|
2022-03-09T14:01:19.000Z
|
tests/test_philips_tiff_tiler.py
|
sectra-medical/opentile
|
1a84284c5bc2c3515e14d5345b6077842897b547
|
[
"Apache-2.0"
] | 4
|
2022-02-04T08:24:20.000Z
|
2022-02-16T12:39:39.000Z
|
# Copyright 2021 SECTRA AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from hashlib import md5
from pathlib import Path
import pytest
from opentile.philips_tiff_tiler import PhilipsTiffTiler
test_data_dir = os.environ.get("OPENTILE_TESTDIR", "tests/testdata")
philips_file_path = Path(test_data_dir).joinpath(
"slides/philips_tiff/philips1/input.tif"
)
@pytest.mark.unittest
class PhilipsTiffTilerTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tiler: PhilipsTiffTiler
@classmethod
def setUpClass(cls):
try:
cls.tiler = PhilipsTiffTiler(philips_file_path)
except FileNotFoundError:
raise unittest.SkipTest(
'Philips tiff test file not found, skipping'
)
cls.level = cls.tiler.get_level(0)
@classmethod
def tearDownClass(cls):
cls.tiler.close()
def test_get_tile(self):
tile = self.level.get_tile((0, 0))
self.assertEqual(
'570d069f9de5d2716fb0d7167bc79195',
md5(tile).hexdigest()
)
tile = self.level.get_tile((20, 20))
self.assertEqual(
'db28efb73a72ef7e2780fc72c624d7ae',
md5(tile).hexdigest()
)
def test_photometric_interpretation(self):
self.assertEqual(
'YCBCR',
self.tiler.get_level(0).photometric_interpretation
)
def test_subsampling(self):
self.assertEqual(
(2, 2),
self.tiler.get_level(0).subsampling
)
def test_sumples_per_pixel(self):
self.assertEqual(
3,
self.tiler.get_level(0).samples_per_pixel
)
| 29.714286
| 77
| 0.652535
|
b555ae5d5f4276e7533584444a998c316ce4d983
| 10,223
|
py
|
Python
|
src/Updater.py
|
losek1/Sounder3
|
21999a309d9a395bcbc88ecb7fbfce1c507dfa6a
|
[
"MIT"
] | 2
|
2019-08-12T15:40:23.000Z
|
2019-08-12T15:41:52.000Z
|
src/Updater.py
|
losek1/Sounder3
|
21999a309d9a395bcbc88ecb7fbfce1c507dfa6a
|
[
"MIT"
] | null | null | null |
src/Updater.py
|
losek1/Sounder3
|
21999a309d9a395bcbc88ecb7fbfce1c507dfa6a
|
[
"MIT"
] | null | null | null |
try:
from os import getcwd, startfile, rename, remove
from os.path import dirname, isfile, basename
from threading import Thread
from json import load
from tkinter import Tk, PhotoImage, sys, Frame
from tkinter import ttk
from typing import ClassVar, Dict, List
from requests import get
from zipfile import ZipFile
import logging
from io import BytesIO
from time import sleep
from psutil import process_iter
from PIL import Image, ImageSequence, ImageTk
except ImportError as e:
sys.exit(e)
# dir
sounder_dir: str = getcwd()
# sounder_dir: str = dirname(sys.executable)
# log
logging.basicConfig(filename=f"{sounder_dir}\\errors.log", level=logging.ERROR)
# window setup
updater_window: ClassVar = Tk()
updater_window.withdraw()
updater_window.geometry(f"375x100+{int(updater_window.winfo_x() + ((updater_window.winfo_screenwidth() - 375) / 2))}"
f"+{int(updater_window.winfo_y() +((updater_window.winfo_screenheight() - 100) / 2))}")
updater_window.title("Sounder updater")
try:
updater_window.iconbitmap(f"{sounder_dir}\\icon.ico")
except:
sys.exit(1)
updater_window.resizable(width=False, height=False)
updater_window.configure(background="#fff")
# theme
updater_theme = ttk.Style()
updater_theme.theme_use('clam')
updater_theme.configure("TLabel", background='#fff', foreground='#000', border='0')
updater_theme.configure("Horizontal.TProgressbar", foreground='#fff', background='#000', lightcolor='#fff'
, darkcolor='#fff', bordercolor='#fff', troughcolor='#fff')
updater_theme.configure("TButton", relief="flat", background='#000', font=('Bahnschrift', 10), foreground='#fff')
updater_theme.map("TButton", background=[('pressed', '!disabled', '#000'), ('active', '#111')])
# variables
config: Dict = {}
server_version: str
package = b''
# functions
def dump(err_obj: ClassVar) -> None:
error_reason_label.configure(text="Error: " + logging.getLevelName(err_obj))
show(error_frame)
logging.error(err_obj, exc_info=True)
def show(window) -> bool:
try:
window.tkraise()
return True
except Exception as e:
dump(e)
return False
def load_config() -> bool:
global config
if isfile('cfg.json'):
try:
with open('cfg.json', 'r') as data:
config = load(data)
return True
except:
return False
else:
return False
def close() -> None:
for widget in updater_window.winfo_children():
widget.destroy()
updater_window.destroy()
sys.exit(0)
def change_mode(mode: str) -> None:
try:
update_progress.stop()
update_progress["value"] = 0
update_progress["maximum"] = 100
if mode == "determinate":
update_progress.configure(mode="determinate")
elif mode == "indeterminate":
update_progress.configure(mode="indeterminate")
update_progress.start(4)
except Exception as e:
dump(e)
def update() -> bool:
global server_version, package, sounder_dir
chunk_size: int = 8192
change_mode("determinate")
Thread(target=load_img, daemon=True).start()
show(checking_frame)
try:
bytes_downloaded: float = 0
server_zip = get(f"https://github.com/losek1/Sounder3/releases/download/v{server_version}/package.zip"
, stream=True)
if server_zip.status_code == 200:
update_progress["maximum"] = int(server_zip.headers.get('Content-Length'))
show(update_frame)
for chunk in server_zip.iter_content(chunk_size=chunk_size):
if chunk:
package += chunk
bytes_downloaded += chunk_size
update_progress["value"] = bytes_downloaded
update_data.configure(text=f"{round(bytes_downloaded / 1000000, 1)}MB / {round(int(server_zip.headers.get('Content-Length')) / 1000000, 1)}MB")
for process in process_iter():
if process.name() == "Sounder3.exe":
process.kill()
change_mode("indeterminate")
update_label.configure(text="Installing updates ...")
with ZipFile(BytesIO(package)) as zip_file:
for file in zip_file.namelist():
update_data.configure(text=f"{zip_file.namelist().index(file)} / {len(zip_file.namelist())}")
if file == "Updater.exe" or file == "errors.log":
continue
try:
zip_file.extract(file, sounder_dir)
except Exception as error_obj:
logging.error(error_obj, exc_info=True)
show(finish_frame)
sleep(3)
open_app()
close()
else:
raise Exception("Cannot contact GitHub servers")
except Exception as e:
dump(e)
return True
def check_updates() -> bool:
global config, server_version
try:
server_version = get(
"https://raw.githubusercontent.com/losek1/Sounder3/master/updates/version.txt").text.strip()
if int(config["version"].replace(".", "")) < int(server_version.replace(".", "")):
return True
else:
return False
except Exception as e:
dump(e)
def open_app() -> None:
try:
if isfile("Sounder3.exe"):
startfile("Sounder3.exe")
except Exception as e:
dump(e)
def update_task() -> None:
Thread(target=update, daemon=True).start()
def self_upgrade() -> bool:
try:
if basename(sys.argv[0]) == "New-Updater.exe":
if isfile("Updater.exe"):
remove("Updater.exe")
rename(sys.argv[0], "Updater.exe")
return False
elif isfile("New-Updater.exe"):
startfile("New-Updater.exe")
return True
else:
return False
except Exception as e:
dump(e)
def show_gui() -> None:
show(checking_frame)
updater_window.deiconify()
updater_window.lift()
updater_window.focus_force()
def init() -> None:
if self_upgrade():
close()
elif load_config():
show_gui()
if check_updates():
update_task()
else:
show(choice_frame)
def load_img() -> None:
try:
img_frames: List = []
download_img: ClassVar = Image.open("download_light.gif")
for frame in ImageSequence.Iterator(download_img):
img_frames.append(ImageTk.PhotoImage(frame.copy().convert('RGBA').resize((48, 48))))
if len(img_frames) > 1:
while True:
for frame in img_frames:
update_img_label.configure(image=frame)
sleep(0.02)
else:
update_img_label.configure(image=img_frames)
except Exception as e:
dump(e)
# frames
# error frame
error_frame: ClassVar = Frame(updater_window)
error_frame.configure(background="#fff")
error_reason_label: ClassVar = ttk.Label(error_frame, text="Error:", anchor='center', font='Bahnschrift 11')
error_exit_button: ClassVar = ttk.Button(error_frame, text="EXIT", cursor="hand2", takefocus=False, command=close)
error_reason_label.place(relx=0.5, rely=0, relheight=0.58, anchor="n")
error_exit_button.place(relx=0.5, rely=0.6, relwidth=0.23, anchor="n")
error_frame.place(relx=0, rely=0, relwidth=1, relheight=1)
# end
# update frame
update_frame: ClassVar = Frame(updater_window)
update_frame.configure(background="#fff")
update_img_label: ClassVar = ttk.Label(update_frame, image=None, anchor='center')
update_label: ClassVar = ttk.Label(update_frame, text="Downloading updates ...", font='Bahnschrift 11')
update_data: ClassVar = ttk.Label(update_frame, text="-- MB / --MB", font='Bahnschrift 10', anchor='center')
update_progress: ClassVar = ttk.Progressbar(update_frame, orient='horizontal')
update_img_label.place(relx=0, rely=0, relwidth=0.25, relheight=1)
update_label.place(relx=0.25, rely=0.15, relwidth=0.45, relheight=0.25)
update_data.place(relx=0.70, rely=0.15, relwidth=0.30, relheight=0.25)
update_progress.place(relx=0.25, rely=0.6, relwidth=0.75, relheight=0.25)
update_frame.place(relx=0, rely=0, relwidth=1, relheight=1)
# end
# check frame
checking_frame: ClassVar = Frame(updater_window)
checking_frame.configure(background="#fff")
checking_label: ClassVar = ttk.Label(checking_frame, text="Verifying\n"
". . .", font='Bahnschrift 16', anchor='center', justify='center')
checking_label.place(relx=0.5, rely=0, relheight=1, anchor="n")
checking_frame.place(relx=0, rely=0, relwidth=1, relheight=1)
# end
# choice frame
choice_frame: ClassVar = Frame(updater_window)
choice_frame.configure(background="#fff")
choice_label: ClassVar = ttk.Label(choice_frame, text="The latest version of Sounder is already installed.\n"
" Would you like to install it anyway?", font='Bahnschrift 11'
, anchor='center', justify='center')
choice_install_button: ClassVar = ttk.Button(choice_frame, text="INSTALL", cursor="hand2", takefocus=False,
command=update_task)
choice_exit_button: ClassVar = ttk.Button(choice_frame, text="EXIT", cursor="hand2", takefocus=False, command=close)
choice_label.place(relx=0.5, rely=0.1, anchor="n")
choice_install_button.place(relx=0.3, rely=0.6, anchor="n")
choice_exit_button.place(relx=0.7, rely=0.6, anchor="n")
choice_frame.place(relx=0, rely=0, relwidth=1, relheight=1)
# end
# finish frame
finish_frame: ClassVar = Frame(updater_window)
finish_frame.configure(background="#fff")
finish_label: ClassVar = ttk.Label(finish_frame, text="All done!\nLaunching sounder in 3s"
, anchor='center', justify='center', font='Bahnschrift 17')
finish_label.place(relx=0, rely=0, relwidth=1, relheight=1)
finish_frame.place(relx=0, rely=0, relwidth=1, relheight=1)
# end
Thread(target=init, daemon=True).start()
updater_window.mainloop()
| 36.906137
| 163
| 0.638462
|
66ebad7fd2f39e50cf9a9f14ad3956b2bb7264bc
| 68,401
|
py
|
Python
|
mlflow/tracking/fluent.py
|
szczeles/mlflow
|
7e7b9f8fe8672e5fdc0a1b2c4180443c24918ad4
|
[
"Apache-2.0"
] | null | null | null |
mlflow/tracking/fluent.py
|
szczeles/mlflow
|
7e7b9f8fe8672e5fdc0a1b2c4180443c24918ad4
|
[
"Apache-2.0"
] | null | null | null |
mlflow/tracking/fluent.py
|
szczeles/mlflow
|
7e7b9f8fe8672e5fdc0a1b2c4180443c24918ad4
|
[
"Apache-2.0"
] | null | null | null |
"""
Internal module implementing the fluent API, allowing management of an active
MLflow run. This module is exposed to users at the top-level :py:mod:`mlflow` module.
"""
import os
import atexit
import time
import logging
import inspect
from copy import deepcopy
from packaging.version import Version
from typing import Any, Dict, List, Optional, Union, TYPE_CHECKING
from mlflow.entities import Experiment, Run, RunInfo, RunStatus, Param, RunTag, Metric, ViewType
from mlflow.entities.lifecycle_stage import LifecycleStage
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import (
INVALID_PARAMETER_VALUE,
RESOURCE_DOES_NOT_EXIST,
)
from mlflow.tracking.client import MlflowClient
from mlflow.tracking import artifact_utils, _get_store
from mlflow.tracking.context import registry as context_registry
from mlflow.tracking.default_experiment import registry as default_experiment_registry
from mlflow.store.tracking import SEARCH_MAX_RESULTS_DEFAULT
from mlflow.utils import env
from mlflow.utils.autologging_utils import (
is_testing,
autologging_integration,
AUTOLOGGING_INTEGRATIONS,
autologging_is_disabled,
)
from mlflow.utils.import_hooks import register_post_import_hook
from mlflow.utils.mlflow_tags import (
MLFLOW_PARENT_RUN_ID,
MLFLOW_RUN_NAME,
MLFLOW_RUN_NOTE,
)
from mlflow.utils.validation import _validate_run_id
if TYPE_CHECKING:
import pandas # pylint: disable=unused-import
import matplotlib # pylint: disable=unused-import
import matplotlib.figure
import plotly # pylint: disable=unused-import
import numpy # pylint: disable=unused-import
import PIL # pylint: disable=unused-import
_EXPERIMENT_ID_ENV_VAR = "MLFLOW_EXPERIMENT_ID"
_EXPERIMENT_NAME_ENV_VAR = "MLFLOW_EXPERIMENT_NAME"
_RUN_ID_ENV_VAR = "MLFLOW_RUN_ID"
_active_run_stack = []
_active_experiment_id = None
_last_active_run_id = None
SEARCH_MAX_RESULTS_PANDAS = 100000
NUM_RUNS_PER_PAGE_PANDAS = 10000
_logger = logging.getLogger(__name__)
def set_experiment(experiment_name: str = None, experiment_id: str = None) -> Experiment:
"""
Set the given experiment as the active experiment. The experiment must either be specified by
name via `experiment_name` or by ID via `experiment_id`. The experiment name and ID cannot
both be specified.
:param experiment_name: Case sensitive name of the experiment to be activated. If an experiment
with this name does not exist, a new experiment wth this name is
created.
:param experiment_id: ID of the experiment to be activated. If an experiment with this ID
does not exist, an exception is thrown.
:return: An instance of :py:class:`mlflow.entities.Experiment` representing the new active
experiment.
.. code-block:: python
:caption: Example
import mlflow
# Set an experiment name, which must be unique and case sensitive.
mlflow.set_experiment("Social NLP Experiments")
# Get Experiment Details
experiment = mlflow.get_experiment_by_name("Social NLP Experiments")
print("Experiment_id: {}".format(experiment.experiment_id))
print("Artifact Location: {}".format(experiment.artifact_location))
print("Tags: {}".format(experiment.tags))
print("Lifecycle_stage: {}".format(experiment.lifecycle_stage))
.. code-block:: text
:caption: Output
Experiment_id: 1
Artifact Location: file:///.../mlruns/1
Tags: {}
Lifecycle_stage: active
"""
if (experiment_name is not None and experiment_id is not None) or (
experiment_name is None and experiment_id is None
):
raise MlflowException(
message="Must specify exactly one of: `experiment_id` or `experiment_name`.",
error_code=INVALID_PARAMETER_VALUE,
)
client = MlflowClient()
if experiment_id is None:
experiment = client.get_experiment_by_name(experiment_name)
if not experiment:
_logger.info(
"Experiment with name '%s' does not exist. Creating a new experiment.",
experiment_name,
)
# NB: If two simultaneous threads or processes attempt to set the same experiment
# simultaneously, a race condition may be encountered here wherein experiment creation
# fails
experiment_id = client.create_experiment(experiment_name)
experiment = client.get_experiment(experiment_id)
else:
experiment = client.get_experiment(experiment_id)
if experiment is None:
raise MlflowException(
message=f"Experiment with ID '{experiment_id}' does not exist.",
error_code=RESOURCE_DOES_NOT_EXIST,
)
if experiment.lifecycle_stage != LifecycleStage.ACTIVE:
raise MlflowException(
message=(
"Cannot set a deleted experiment '%s' as the active experiment."
" You can restore the experiment, or permanently delete the "
" experiment to create a new one." % experiment.name
),
error_code=INVALID_PARAMETER_VALUE,
)
global _active_experiment_id
_active_experiment_id = experiment.experiment_id
return experiment
class ActiveRun(Run): # pylint: disable=W0223
"""Wrapper around :py:class:`mlflow.entities.Run` to enable using Python ``with`` syntax."""
def __init__(self, run):
Run.__init__(self, run.info, run.data)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
status = RunStatus.FINISHED if exc_type is None else RunStatus.FAILED
end_run(RunStatus.to_string(status))
return exc_type is None
def start_run(
run_id: str = None,
experiment_id: Optional[str] = None,
run_name: Optional[str] = None,
nested: bool = False,
tags: Optional[Dict[str, Any]] = None,
description: Optional[str] = None,
) -> ActiveRun:
"""
Start a new MLflow run, setting it as the active run under which metrics and parameters
will be logged. The return value can be used as a context manager within a ``with`` block;
otherwise, you must call ``end_run()`` to terminate the current run.
If you pass a ``run_id`` or the ``MLFLOW_RUN_ID`` environment variable is set,
``start_run`` attempts to resume a run with the specified run ID and
other parameters are ignored. ``run_id`` takes precedence over ``MLFLOW_RUN_ID``.
If resuming an existing run, the run status is set to ``RunStatus.RUNNING``.
MLflow sets a variety of default tags on the run, as defined in
:ref:`MLflow system tags <system_tags>`.
:param run_id: If specified, get the run with the specified UUID and log parameters
and metrics under that run. The run's end time is unset and its status
is set to running, but the run's other attributes (``source_version``,
``source_type``, etc.) are not changed.
:param experiment_id: ID of the experiment under which to create the current run (applicable
only when ``run_id`` is not specified). If ``experiment_id`` argument
is unspecified, will look for valid experiment in the following order:
activated using ``set_experiment``, ``MLFLOW_EXPERIMENT_NAME``
environment variable, ``MLFLOW_EXPERIMENT_ID`` environment variable,
or the default experiment as defined by the tracking server.
:param run_name: Name of new run (stored as a ``mlflow.runName`` tag).
Used only when ``run_id`` is unspecified.
:param nested: Controls whether run is nested in parent run. ``True`` creates a nested run.
:param tags: An optional dictionary of string keys and values to set as tags on the run.
If a run is being resumed, these tags are set on the resumed run. If a new run is
being created, these tags are set on the new run.
:param description: An optional string that populates the description box of the run.
If a run is being resumed, the description is set on the resumed run.
If a new run is being created, the description is set on the new run.
:return: :py:class:`mlflow.ActiveRun` object that acts as a context manager wrapping
the run's state.
.. code-block:: python
:caption: Example
import mlflow
# Create nested runs
with mlflow.start_run(run_name='PARENT_RUN') as parent_run:
mlflow.log_param("parent", "yes")
with mlflow.start_run(run_name='CHILD_RUN', nested=True) as child_run:
mlflow.log_param("child", "yes")
print("parent run_id: {}".format(parent_run.info.run_id))
print("child run_id : {}".format(child_run.info.run_id))
print("--")
# Search all child runs with a parent id
query = "tags.mlflow.parentRunId = '{}'".format(parent_run.info.run_id)
results = mlflow.search_runs(filter_string=query)
print(results[["run_id", "params.child", "tags.mlflow.runName"]])
.. code-block:: text
:caption: Output
parent run_id: 5ec0e7ae18f54c2694ffb48c2fccf25c
child run_id : 78b3b0d264b44cd29e8dc389749bb4be
--
run_id params.child tags.mlflow.runName
0 78b3b0d264b44cd29e8dc389749bb4be yes CHILD_RUN
"""
global _active_run_stack
# back compat for int experiment_id
experiment_id = str(experiment_id) if isinstance(experiment_id, int) else experiment_id
if len(_active_run_stack) > 0 and not nested:
raise Exception(
(
"Run with UUID {} is already active. To start a new run, first end the "
+ "current run with mlflow.end_run(). To start a nested "
+ "run, call start_run with nested=True"
).format(_active_run_stack[0].info.run_id)
)
client = MlflowClient()
if run_id:
existing_run_id = run_id
elif _RUN_ID_ENV_VAR in os.environ:
existing_run_id = os.environ[_RUN_ID_ENV_VAR]
del os.environ[_RUN_ID_ENV_VAR]
else:
existing_run_id = None
if existing_run_id:
_validate_run_id(existing_run_id)
active_run_obj = client.get_run(existing_run_id)
# Check to see if experiment_id from environment matches experiment_id from set_experiment()
if (
_active_experiment_id is not None
and _active_experiment_id != active_run_obj.info.experiment_id
):
raise MlflowException(
"Cannot start run with ID {} because active run ID "
"does not match environment run ID. Make sure --experiment-name "
"or --experiment-id matches experiment set with "
"set_experiment(), or just use command-line "
"arguments".format(existing_run_id)
)
# Check to see if current run isn't deleted
if active_run_obj.info.lifecycle_stage == LifecycleStage.DELETED:
raise MlflowException(
"Cannot start run with ID {} because it is in the "
"deleted state.".format(existing_run_id)
)
# Use previous end_time because a value is required for update_run_info
end_time = active_run_obj.info.end_time
_get_store().update_run_info(
existing_run_id, run_status=RunStatus.RUNNING, end_time=end_time
)
tags = tags or {}
if description:
if MLFLOW_RUN_NOTE in tags:
raise MlflowException(
f"Description is already set via the tag {MLFLOW_RUN_NOTE} in tags."
f"Remove the key {MLFLOW_RUN_NOTE} from the tags or omit the description.",
error_code=INVALID_PARAMETER_VALUE,
)
tags[MLFLOW_RUN_NOTE] = description
if tags:
client.log_batch(
run_id=existing_run_id,
tags=[RunTag(key, str(value)) for key, value in tags.items()],
)
active_run_obj = client.get_run(existing_run_id)
else:
if len(_active_run_stack) > 0:
parent_run_id = _active_run_stack[-1].info.run_id
else:
parent_run_id = None
exp_id_for_run = experiment_id if experiment_id is not None else _get_experiment_id()
user_specified_tags = deepcopy(tags) or {}
if description:
if MLFLOW_RUN_NOTE in user_specified_tags:
raise MlflowException(
f"Description is already set via the tag {MLFLOW_RUN_NOTE} in tags."
f"Remove the key {MLFLOW_RUN_NOTE} from the tags or omit the description.",
error_code=INVALID_PARAMETER_VALUE,
)
user_specified_tags[MLFLOW_RUN_NOTE] = description
if parent_run_id is not None:
user_specified_tags[MLFLOW_PARENT_RUN_ID] = parent_run_id
if run_name is not None:
user_specified_tags[MLFLOW_RUN_NAME] = run_name
resolved_tags = context_registry.resolve_tags(user_specified_tags)
active_run_obj = client.create_run(experiment_id=exp_id_for_run, tags=resolved_tags)
_active_run_stack.append(ActiveRun(active_run_obj))
return _active_run_stack[-1]
def end_run(status: str = RunStatus.to_string(RunStatus.FINISHED)) -> None:
"""End an active MLflow run (if there is one).
.. code-block:: python
:caption: Example
import mlflow
# Start run and get status
mlflow.start_run()
run = mlflow.active_run()
print("run_id: {}; status: {}".format(run.info.run_id, run.info.status))
# End run and get status
mlflow.end_run()
run = mlflow.get_run(run.info.run_id)
print("run_id: {}; status: {}".format(run.info.run_id, run.info.status))
print("--")
# Check for any active runs
print("Active run: {}".format(mlflow.active_run()))
.. code-block:: text
:caption: Output
run_id: b47ee4563368419880b44ad8535f6371; status: RUNNING
run_id: b47ee4563368419880b44ad8535f6371; status: FINISHED
--
Active run: None
"""
global _active_run_stack, _last_active_run_id
if len(_active_run_stack) > 0:
# Clear out the global existing run environment variable as well.
env.unset_variable(_RUN_ID_ENV_VAR)
run = _active_run_stack.pop()
MlflowClient().set_terminated(run.info.run_id, status)
_last_active_run_id = run.info.run_id
atexit.register(end_run)
def active_run() -> Optional[ActiveRun]:
"""Get the currently active ``Run``, or None if no such run exists.
**Note**: You cannot access currently-active run attributes
(parameters, metrics, etc.) through the run returned by ``mlflow.active_run``. In order
to access such attributes, use the :py:class:`mlflow.tracking.MlflowClient` as follows:
.. code-block:: python
:caption: Example
import mlflow
mlflow.start_run()
run = mlflow.active_run()
print("Active run_id: {}".format(run.info.run_id))
mlflow.end_run()
.. code-block:: text
:caption: Output
Active run_id: 6f252757005748708cd3aad75d1ff462
"""
return _active_run_stack[-1] if len(_active_run_stack) > 0 else None
def last_active_run() -> Optional[Run]:
"""
Gets the most recent active run.
Examples:
.. code-block:: python
:caption: To retrieve the most recent autologged run:
import mlflow
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_diabetes
from sklearn.ensemble import RandomForestRegressor
mlflow.autolog()
db = load_diabetes()
X_train, X_test, y_train, y_test = train_test_split(db.data, db.target)
# Create and train models.
rf = RandomForestRegressor(n_estimators = 100, max_depth = 6, max_features = 3)
rf.fit(X_train, y_train)
# Use the model to make predictions on the test dataset.
predictions = rf.predict(X_test)
autolog_run = mlflow.last_active_run()
.. code-block:: python
:caption: To get the most recently active run that ended:
import mlflow
mlflow.start_run()
mlflow.end_run()
run = mlflow.last_active_run()
.. code-block:: python
:caption: To retrieve the currently active run:
import mlflow
mlflow.start_run()
run = mlflow.last_active_run()
mlflow.end_run()
:return: The active run (this is equivalent to ``mlflow.active_run()``) if one exists.
Otherwise, the last run started from the current Python process that reached
a terminal status (i.e. FINISHED, FAILED, or KILLED).
"""
_active_run = active_run()
if _active_run is not None:
return _active_run
if _last_active_run_id is None:
return None
return get_run(_last_active_run_id)
def get_run(run_id: str) -> Run:
"""
Fetch the run from backend store. The resulting :py:class:`Run <mlflow.entities.Run>`
contains a collection of run metadata -- :py:class:`RunInfo <mlflow.entities.RunInfo>`,
as well as a collection of run parameters, tags, and metrics --
:py:class:`RunData <mlflow.entities.RunData>`. In the case where multiple metrics with the
same key are logged for the run, the :py:class:`RunData <mlflow.entities.RunData>` contains
the most recently logged value at the largest step for each metric.
:param run_id: Unique identifier for the run.
:return: A single :py:class:`mlflow.entities.Run` object, if the run exists. Otherwise,
raises an exception.
.. code-block:: python
:caption: Example
import mlflow
with mlflow.start_run() as run:
mlflow.log_param("p", 0)
run_id = run.info.run_id
print("run_id: {}; lifecycle_stage: {}".format(run_id,
mlflow.get_run(run_id).info.lifecycle_stage))
.. code-block:: text
:caption: Output
run_id: 7472befefc754e388e8e922824a0cca5; lifecycle_stage: active
"""
return MlflowClient().get_run(run_id)
def log_param(key: str, value: Any) -> None:
"""
Log a parameter under the current run. If no run is active, this method will create
a new active run.
:param key: Parameter name (string). This string may only contain alphanumerics,
underscores (_), dashes (-), periods (.), spaces ( ), and slashes (/).
All backend stores will support keys up to length 250, but some may
support larger keys.
:param value: Parameter value (string, but will be string-ified if not).
All backend stores will support values up to length 5000, but some
may support larger values.
.. code-block:: python
:caption: Example
import mlflow
with mlflow.start_run():
mlflow.log_param("learning_rate", 0.01)
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_param(run_id, key, value)
def set_tag(key: str, value: Any) -> None:
"""
Set a tag under the current run. If no run is active, this method will create a
new active run.
:param key: Tag name (string). This string may only contain alphanumerics, underscores
(_), dashes (-), periods (.), spaces ( ), and slashes (/).
All backend stores will support keys up to length 250, but some may
support larger keys.
:param value: Tag value (string, but will be string-ified if not).
All backend stores will support values up to length 5000, but some
may support larger values.
.. code-block:: python
:caption: Example
import mlflow
with mlflow.start_run():
mlflow.set_tag("release.version", "2.2.0")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().set_tag(run_id, key, value)
def delete_tag(key: str) -> None:
"""
Delete a tag from a run. This is irreversible. If no run is active, this method
will create a new active run.
:param key: Name of the tag
.. code-block:: python
:caption: Example
import mlflow
tags = {"engineering": "ML Platform",
"engineering_remote": "ML Platform"}
with mlflow.start_run() as run:
mlflow.set_tags(tags)
with mlflow.start_run(run_id=run.info.run_id):
mlflow.delete_tag("engineering_remote")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().delete_tag(run_id, key)
def log_metric(key: str, value: float, step: Optional[int] = None) -> None:
"""
Log a metric under the current run. If no run is active, this method will create
a new active run.
:param key: Metric name (string). This string may only contain alphanumerics, underscores (_),
dashes (-), periods (.), spaces ( ), and slashes (/).
All backend stores will support keys up to length 250, but some may
support larger keys.
:param value: Metric value (float). Note that some special values such as +/- Infinity may be
replaced by other values depending on the store. For example, the
SQLAlchemy store replaces +/- Infinity with max / min float values.
All backend stores will support values up to length 5000, but some
may support larger values.
:param step: Metric step (int). Defaults to zero if unspecified.
.. code-block:: python
:caption: Example
import mlflow
with mlflow.start_run():
mlflow.log_metric("mse", 2500.00)
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_metric(run_id, key, value, int(time.time() * 1000), step or 0)
def log_metrics(metrics: Dict[str, float], step: Optional[int] = None) -> None:
"""
Log multiple metrics for the current run. If no run is active, this method will create a new
active run.
:param metrics: Dictionary of metric_name: String -> value: Float. Note that some special
values such as +/- Infinity may be replaced by other values depending on
the store. For example, sql based store may replace +/- Infinity with
max / min float values.
:param step: A single integer step at which to log the specified
Metrics. If unspecified, each metric is logged at step zero.
:returns: None
.. code-block:: python
:caption: Example
import mlflow
metrics = {"mse": 2500.00, "rmse": 50.00}
# Log a batch of metrics
with mlflow.start_run():
mlflow.log_metrics(metrics)
"""
run_id = _get_or_start_run().info.run_id
timestamp = int(time.time() * 1000)
metrics_arr = [Metric(key, value, timestamp, step or 0) for key, value in metrics.items()]
MlflowClient().log_batch(run_id=run_id, metrics=metrics_arr, params=[], tags=[])
def log_params(params: Dict[str, Any]) -> None:
"""
Log a batch of params for the current run. If no run is active, this method will create a
new active run.
:param params: Dictionary of param_name: String -> value: (String, but will be string-ified if
not)
:returns: None
.. code-block:: python
:caption: Example
import mlflow
params = {"learning_rate": 0.01, "n_estimators": 10}
# Log a batch of parameters
with mlflow.start_run():
mlflow.log_params(params)
"""
run_id = _get_or_start_run().info.run_id
params_arr = [Param(key, str(value)) for key, value in params.items()]
MlflowClient().log_batch(run_id=run_id, metrics=[], params=params_arr, tags=[])
def set_tags(tags: Dict[str, Any]) -> None:
"""
Log a batch of tags for the current run. If no run is active, this method will create a
new active run.
:param tags: Dictionary of tag_name: String -> value: (String, but will be string-ified if
not)
:returns: None
.. code-block:: python
:caption: Example
import mlflow
tags = {"engineering": "ML Platform",
"release.candidate": "RC1",
"release.version": "2.2.0"}
# Set a batch of tags
with mlflow.start_run():
mlflow.set_tags(tags)
"""
run_id = _get_or_start_run().info.run_id
tags_arr = [RunTag(key, str(value)) for key, value in tags.items()]
MlflowClient().log_batch(run_id=run_id, metrics=[], params=[], tags=tags_arr)
def log_artifact(local_path: str, artifact_path: Optional[str] = None) -> None:
"""
Log a local file or directory as an artifact of the currently active run. If no run is
active, this method will create a new active run.
:param local_path: Path to the file to write.
:param artifact_path: If provided, the directory in ``artifact_uri`` to write to.
.. code-block:: python
:caption: Example
import mlflow
# Create a features.txt artifact file
features = "rooms, zipcode, median_price, school_rating, transport"
with open("features.txt", 'w') as f:
f.write(features)
# With artifact_path=None write features.txt under
# root artifact_uri/artifacts directory
with mlflow.start_run():
mlflow.log_artifact("features.txt")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_artifact(run_id, local_path, artifact_path)
def log_artifacts(local_dir: str, artifact_path: Optional[str] = None) -> None:
"""
Log all the contents of a local directory as artifacts of the run. If no run is active,
this method will create a new active run.
:param local_dir: Path to the directory of files to write.
:param artifact_path: If provided, the directory in ``artifact_uri`` to write to.
.. code-block:: python
:caption: Example
import os
import mlflow
# Create some files to preserve as artifacts
features = "rooms, zipcode, median_price, school_rating, transport"
data = {"state": "TX", "Available": 25, "Type": "Detached"}
# Create couple of artifact files under the directory "data"
os.makedirs("data", exist_ok=True)
with open("data/data.json", 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2)
with open("data/features.txt", 'w') as f:
f.write(features)
# Write all files in "data" to root artifact_uri/states
with mlflow.start_run():
mlflow.log_artifacts("data", artifact_path="states")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_artifacts(run_id, local_dir, artifact_path)
def log_text(text: str, artifact_file: str) -> None:
"""
Log text as an artifact.
:param text: String containing text to log.
:param artifact_file: The run-relative artifact file path in posixpath format to which
the text is saved (e.g. "dir/file.txt").
.. code-block:: python
:caption: Example
import mlflow
with mlflow.start_run():
# Log text to a file under the run's root artifact directory
mlflow.log_text("text1", "file1.txt")
# Log text in a subdirectory of the run's root artifact directory
mlflow.log_text("text2", "dir/file2.txt")
# Log HTML text
mlflow.log_text("<h1>header</h1>", "index.html")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_text(run_id, text, artifact_file)
def log_dict(dictionary: Any, artifact_file: str) -> None:
"""
Log a JSON/YAML-serializable object (e.g. `dict`) as an artifact. The serialization
format (JSON or YAML) is automatically inferred from the extension of `artifact_file`.
If the file extension doesn't exist or match any of [".json", ".yml", ".yaml"],
JSON format is used.
:param dictionary: Dictionary to log.
:param artifact_file: The run-relative artifact file path in posixpath format to which
the dictionary is saved (e.g. "dir/data.json").
.. code-block:: python
:caption: Example
import mlflow
dictionary = {"k": "v"}
with mlflow.start_run():
# Log a dictionary as a JSON file under the run's root artifact directory
mlflow.log_dict(dictionary, "data.json")
# Log a dictionary as a YAML file in a subdirectory of the run's root artifact directory
mlflow.log_dict(dictionary, "dir/data.yml")
# If the file extension doesn't exist or match any of [".json", ".yaml", ".yml"],
# JSON format is used.
mlflow.log_dict(dictionary, "data")
mlflow.log_dict(dictionary, "data.txt")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_dict(run_id, dictionary, artifact_file)
def log_figure(
figure: Union["matplotlib.figure.Figure", "plotly.graph_objects.Figure"], artifact_file: str
) -> None:
"""
Log a figure as an artifact. The following figure objects are supported:
- `matplotlib.figure.Figure`_
- `plotly.graph_objects.Figure`_
.. _matplotlib.figure.Figure:
https://matplotlib.org/api/_as_gen/matplotlib.figure.Figure.html
.. _plotly.graph_objects.Figure:
https://plotly.com/python-api-reference/generated/plotly.graph_objects.Figure.html
:param figure: Figure to log.
:param artifact_file: The run-relative artifact file path in posixpath format to which
the figure is saved (e.g. "dir/file.png").
.. code-block:: python
:caption: Matplotlib Example
import mlflow
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.plot([0, 1], [2, 3])
with mlflow.start_run():
mlflow.log_figure(fig, "figure.png")
.. code-block:: python
:caption: Plotly Example
import mlflow
from plotly import graph_objects as go
fig = go.Figure(go.Scatter(x=[0, 1], y=[2, 3]))
with mlflow.start_run():
mlflow.log_figure(fig, "figure.html")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_figure(run_id, figure, artifact_file)
def log_image(image: Union["numpy.ndarray", "PIL.Image.Image"], artifact_file: str) -> None:
"""
Log an image as an artifact. The following image objects are supported:
- `numpy.ndarray`_
- `PIL.Image.Image`_
.. _numpy.ndarray:
https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html
.. _PIL.Image.Image:
https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image
Numpy array support
- data type (( ) represents a valid value range):
- bool
- integer (0 ~ 255)
- unsigned integer (0 ~ 255)
- float (0.0 ~ 1.0)
.. warning::
- Out-of-range integer values will be **clipped** to [0, 255].
- Out-of-range float values will be **clipped** to [0, 1].
- shape (H: height, W: width):
- H x W (Grayscale)
- H x W x 1 (Grayscale)
- H x W x 3 (an RGB channel order is assumed)
- H x W x 4 (an RGBA channel order is assumed)
:param image: Image to log.
:param artifact_file: The run-relative artifact file path in posixpath format to which
the image is saved (e.g. "dir/image.png").
.. code-block:: python
:caption: Numpy Example
import mlflow
import numpy as np
image = np.random.randint(0, 256, size=(100, 100, 3), dtype=np.uint8)
with mlflow.start_run():
mlflow.log_image(image, "image.png")
.. code-block:: python
:caption: Pillow Example
import mlflow
from PIL import Image
image = Image.new("RGB", (100, 100))
with mlflow.start_run():
mlflow.log_image(image, "image.png")
"""
run_id = _get_or_start_run().info.run_id
MlflowClient().log_image(run_id, image, artifact_file)
def _record_logged_model(mlflow_model):
run_id = _get_or_start_run().info.run_id
MlflowClient()._record_logged_model(run_id, mlflow_model)
def get_experiment(experiment_id: str) -> Experiment:
"""
Retrieve an experiment by experiment_id from the backend store
:param experiment_id: The string-ified experiment ID returned from ``create_experiment``.
:return: :py:class:`mlflow.entities.Experiment`
.. code-block:: python
:caption: Example
import mlflow
experiment = mlflow.get_experiment("0")
print("Name: {}".format(experiment.name))
print("Artifact Location: {}".format(experiment.artifact_location))
print("Tags: {}".format(experiment.tags))
print("Lifecycle_stage: {}".format(experiment.lifecycle_stage))
.. code-block:: text
:caption: Output
Name: Default
Artifact Location: file:///.../mlruns/0
Tags: {}
Lifecycle_stage: active
"""
return MlflowClient().get_experiment(experiment_id)
def get_experiment_by_name(name: str) -> Optional[Experiment]:
"""
Retrieve an experiment by experiment name from the backend store
:param name: The case senstive experiment name.
:return: An instance of :py:class:`mlflow.entities.Experiment`
if an experiment with the specified name exists, otherwise None.
.. code-block:: python
:caption: Example
import mlflow
# Case sensitive name
experiment = mlflow.get_experiment_by_name("Default")
print("Experiment_id: {}".format(experiment.experiment_id))
print("Artifact Location: {}".format(experiment.artifact_location))
print("Tags: {}".format(experiment.tags))
print("Lifecycle_stage: {}".format(experiment.lifecycle_stage))
.. code-block:: text
:caption: Output
Experiment_id: 0
Artifact Location: file:///.../mlruns/0
Tags: {}
Lifecycle_stage: active
"""
return MlflowClient().get_experiment_by_name(name)
def list_experiments(
view_type: int = ViewType.ACTIVE_ONLY,
max_results: Optional[int] = None,
) -> List[Experiment]:
"""
:param view_type: Qualify requested type of experiments.
:param max_results: If passed, specifies the maximum number of experiments desired. If not
passed, all experiments will be returned.
:return: A list of :py:class:`Experiment <mlflow.entities.Experiment>` objects.
"""
def pagination_wrapper_func(number_to_get, next_page_token):
return MlflowClient().list_experiments(
view_type=view_type,
max_results=number_to_get,
page_token=next_page_token,
)
return _paginate(pagination_wrapper_func, SEARCH_MAX_RESULTS_DEFAULT, max_results)
def create_experiment(
name: str,
artifact_location: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
) -> str:
"""
Create an experiment.
:param name: The experiment name, which must be unique and is case sensitive
:param artifact_location: The location to store run artifacts.
If not provided, the server picks an appropriate default.
:param tags: An optional dictionary of string keys and values to set as
tags on the experiment.
:return: String ID of the created experiment.
.. code-block:: python
:caption: Example
import mlflow
# Create an experiment name, which must be unique and case sensitive
experiment_id = mlflow.create_experiment("Social NLP Experiments")
experiment = mlflow.get_experiment(experiment_id)
print("Name: {}".format(experiment.name))
print("Experiment_id: {}".format(experiment.experiment_id))
print("Artifact Location: {}".format(experiment.artifact_location))
print("Tags: {}".format(experiment.tags))
print("Lifecycle_stage: {}".format(experiment.lifecycle_stage))
.. code-block:: text
:caption: Output
Name: Social NLP Experiments
Experiment_id: 1
Artifact Location: file:///.../mlruns/1
Tags= {}
Lifecycle_stage: active
"""
return MlflowClient().create_experiment(name, artifact_location, tags)
def delete_experiment(experiment_id: str) -> None:
"""
Delete an experiment from the backend store.
:param experiment_id: The The string-ified experiment ID returned from ``create_experiment``.
.. code-block:: python
:caption: Example
import mlflow
experiment_id = mlflow.create_experiment("New Experiment")
mlflow.delete_experiment(experiment_id)
# Examine the deleted experiment details.
experiment = mlflow.get_experiment(experiment_id)
print("Name: {}".format(experiment.name))
print("Artifact Location: {}".format(experiment.artifact_location))
print("Lifecycle_stage: {}".format(experiment.lifecycle_stage))
.. code-block:: text
:caption: Output
Name: New Experiment
Artifact Location: file:///.../mlruns/2
Lifecycle_stage: deleted
"""
MlflowClient().delete_experiment(experiment_id)
def delete_run(run_id: str) -> None:
"""
Deletes a run with the given ID.
:param run_id: Unique identifier for the run to delete.
.. code-block:: python
:caption: Example
import mlflow
with mlflow.start_run() as run:
mlflow.log_param("p", 0)
run_id = run.info.run_id
mlflow.delete_run(run_id)
print("run_id: {}; lifecycle_stage: {}".format(run_id,
mlflow.get_run(run_id).info.lifecycle_stage))
.. code-block:: text
:caption: Output
run_id: 45f4af3e6fd349e58579b27fcb0b8277; lifecycle_stage: deleted
"""
MlflowClient().delete_run(run_id)
def get_artifact_uri(artifact_path: Optional[str] = None) -> str:
"""
Get the absolute URI of the specified artifact in the currently active run.
If `path` is not specified, the artifact root URI of the currently active
run will be returned; calls to ``log_artifact`` and ``log_artifacts`` write
artifact(s) to subdirectories of the artifact root URI.
If no run is active, this method will create a new active run.
:param artifact_path: The run-relative artifact path for which to obtain an absolute URI.
For example, "path/to/artifact". If unspecified, the artifact root URI
for the currently active run will be returned.
:return: An *absolute* URI referring to the specified artifact or the currently adtive run's
artifact root. For example, if an artifact path is provided and the currently active
run uses an S3-backed store, this may be a uri of the form
``s3://<bucket_name>/path/to/artifact/root/path/to/artifact``. If an artifact path
is not provided and the currently active run uses an S3-backed store, this may be a
URI of the form ``s3://<bucket_name>/path/to/artifact/root``.
.. code-block:: python
:caption: Example
import mlflow
features = "rooms, zipcode, median_price, school_rating, transport"
with open("features.txt", 'w') as f:
f.write(features)
# Log the artifact in a directory "features" under the root artifact_uri/features
with mlflow.start_run():
mlflow.log_artifact("features.txt", artifact_path="features")
# Fetch the artifact uri root directory
artifact_uri = mlflow.get_artifact_uri()
print("Artifact uri: {}".format(artifact_uri))
# Fetch a specific artifact uri
artifact_uri = mlflow.get_artifact_uri(artifact_path="features/features.txt")
print("Artifact uri: {}".format(artifact_uri))
.. code-block:: text
:caption: Output
Artifact uri: file:///.../0/a46a80f1c9644bd8f4e5dd5553fffce/artifacts
Artifact uri: file:///.../0/a46a80f1c9644bd8f4e5dd5553fffce/artifacts/features/features.txt
"""
return artifact_utils.get_artifact_uri(
run_id=_get_or_start_run().info.run_id, artifact_path=artifact_path
)
def search_runs(
experiment_ids: Optional[List[str]] = None,
filter_string: str = "",
run_view_type: int = ViewType.ACTIVE_ONLY,
max_results: int = SEARCH_MAX_RESULTS_PANDAS,
order_by: Optional[List[str]] = None,
output_format: str = "pandas",
search_all_experiments: bool = False,
experiment_names: Optional[List[str]] = None,
) -> Union[List[Run], "pandas.DataFrame"]:
"""
Get a pandas DataFrame of runs that fit the search criteria.
:param experiment_ids: List of experiment IDs. Search can work with experiment IDs or
experiment names, but not both in the same call. Values other than
``None`` or ``[]`` will result in error if ``experiment_names`` is
also not ``None`` or ``[]``. ``None`` will default to the active
experiment if ``experiment_names`` is ``None`` or ``[]``.
:param filter_string: Filter query string, defaults to searching all runs.
:param run_view_type: one of enum values ``ACTIVE_ONLY``, ``DELETED_ONLY``, or ``ALL`` runs
defined in :py:class:`mlflow.entities.ViewType`.
:param max_results: The maximum number of runs to put in the dataframe. Default is 100,000
to avoid causing out-of-memory issues on the user's machine.
:param order_by: List of columns to order by (e.g., "metrics.rmse"). The ``order_by`` column
can contain an optional ``DESC`` or ``ASC`` value. The default is ``ASC``.
The default ordering is to sort by ``start_time DESC``, then ``run_id``.
:param output_format: The output format to be returned. If ``pandas``, a ``pandas.DataFrame``
is returned and, if ``list``, a list of :py:class:`mlflow.entities.Run`
is returned.
:param search_all_experiments: Boolean specifying whether all experiments should be searched.
Only honored if ``experiment_ids`` is ``[]`` or ``None``.
:param experiment_names: List of experiment names. Search can work with experiment IDs or
experiment names, but not both in the same call. Values other
than ``None`` or ``[]`` will result in error if ``experiment_ids``
is also not ``None`` or ``[]``. ``None`` will default to the active
experiment if ``experiment_ids`` is ``None`` or ``[]``.
:return: If output_format is ``list``: a list of :py:class:`mlflow.entities.Run`. If
output_format is ``pandas``: ``pandas.DataFrame`` of runs, where each metric,
parameter, and tag is expanded into its own column named metrics.*, params.*, or
tags.* respectively. For runs that don't have a particular metric, parameter, or tag,
the value for the corresponding column is (NumPy) ``Nan``, ``None``, or ``None``
respectively.
.. code-block:: python
:caption: Example
import mlflow
# Create an experiment and log two runs under it
experiment_name = "Social NLP Experiments"
experiment_id = mlflow.create_experiment(experiment_name)
with mlflow.start_run(experiment_id=experiment_id):
mlflow.log_metric("m", 1.55)
mlflow.set_tag("s.release", "1.1.0-RC")
with mlflow.start_run(experiment_id=experiment_id):
mlflow.log_metric("m", 2.50)
mlflow.set_tag("s.release", "1.2.0-GA")
# Search for all the runs in the experiment with the given experiment ID
df = mlflow.search_runs([experiment_id], order_by=["metrics.m DESC"])
print(df[["metrics.m", "tags.s.release", "run_id"]])
print("--")
# Search the experiment_id using a filter_string with tag
# that has a case insensitive pattern
filter_string = "tags.s.release ILIKE '%rc%'"
df = mlflow.search_runs([experiment_id], filter_string=filter_string)
print(df[["metrics.m", "tags.s.release", "run_id"]])
print("--")
# Search for all the runs in the experiment with the given experiment name
df = mlflow.search_runs(experiment_name=[experiment_name], order_by=["metrics.m DESC"])
print(df[["metrics.m", "tags.s.release", "run_id"]])
.. code-block:: text
:caption: Output
metrics.m tags.s.release run_id
0 2.50 1.2.0-GA 147eed886ab44633902cc8e19b2267e2
1 1.55 1.1.0-RC 5cc7feaf532f496f885ad7750809c4d4
--
metrics.m tags.s.release run_id
0 1.55 1.1.0-RC 5cc7feaf532f496f885ad7750809c4d4
--
metrics.m tags.s.release run_id
0 2.50 1.2.0-GA 147eed886ab44633902cc8e19b2267e2
1 1.55 1.1.0-RC 5cc7feaf532f496f885ad7750809c4d4
"""
no_ids = experiment_ids is None or len(experiment_ids) == 0
no_names = experiment_names is None or len(experiment_names) == 0
no_ids_or_names = no_ids and no_names
if not no_ids and not no_names:
raise MlflowException(
message="Only experiment_ids or experiment_names can be used, but not both",
error_code=INVALID_PARAMETER_VALUE,
)
if search_all_experiments and no_ids_or_names:
experiment_ids = [
exp.experiment_id for exp in list_experiments(view_type=ViewType.ACTIVE_ONLY)
]
elif no_ids_or_names:
experiment_ids = _get_experiment_id()
elif not no_names:
experiments = [get_experiment_by_name(n) for n in experiment_names if n is not None]
experiment_ids = [e.experiment_id for e in experiments if e is not None]
# Using an internal function as the linter doesn't like assigning a lambda, and inlining the
# full thing is a mess
def pagination_wrapper_func(number_to_get, next_page_token):
return MlflowClient().search_runs(
experiment_ids,
filter_string,
run_view_type,
number_to_get,
order_by,
next_page_token,
)
runs = _paginate(pagination_wrapper_func, NUM_RUNS_PER_PAGE_PANDAS, max_results)
if output_format == "list":
return runs # List[mlflow.entities.run.Run]
elif output_format == "pandas":
import numpy as np
import pandas as pd
info = {
"run_id": [],
"experiment_id": [],
"status": [],
"artifact_uri": [],
"start_time": [],
"end_time": [],
}
params, metrics, tags = ({}, {}, {})
PARAM_NULL, METRIC_NULL, TAG_NULL = (None, np.nan, None)
for i, run in enumerate(runs):
info["run_id"].append(run.info.run_id)
info["experiment_id"].append(run.info.experiment_id)
info["status"].append(run.info.status)
info["artifact_uri"].append(run.info.artifact_uri)
info["start_time"].append(pd.to_datetime(run.info.start_time, unit="ms", utc=True))
info["end_time"].append(pd.to_datetime(run.info.end_time, unit="ms", utc=True))
# Params
param_keys = set(params.keys())
for key in param_keys:
if key in run.data.params:
params[key].append(run.data.params[key])
else:
params[key].append(PARAM_NULL)
new_params = set(run.data.params.keys()) - param_keys
for p in new_params:
params[p] = [PARAM_NULL] * i # Fill in null values for all previous runs
params[p].append(run.data.params[p])
# Metrics
metric_keys = set(metrics.keys())
for key in metric_keys:
if key in run.data.metrics:
metrics[key].append(run.data.metrics[key])
else:
metrics[key].append(METRIC_NULL)
new_metrics = set(run.data.metrics.keys()) - metric_keys
for m in new_metrics:
metrics[m] = [METRIC_NULL] * i
metrics[m].append(run.data.metrics[m])
# Tags
tag_keys = set(tags.keys())
for key in tag_keys:
if key in run.data.tags:
tags[key].append(run.data.tags[key])
else:
tags[key].append(TAG_NULL)
new_tags = set(run.data.tags.keys()) - tag_keys
for t in new_tags:
tags[t] = [TAG_NULL] * i
tags[t].append(run.data.tags[t])
data = {}
data.update(info)
for key in metrics:
data["metrics." + key] = metrics[key]
for key in params:
data["params." + key] = params[key]
for key in tags:
data["tags." + key] = tags[key]
return pd.DataFrame(data)
else:
raise ValueError(
"Unsupported output format: %s. Supported string values are 'pandas' or 'list'"
% output_format
)
def list_run_infos(
experiment_id: str,
run_view_type: int = ViewType.ACTIVE_ONLY,
max_results: int = SEARCH_MAX_RESULTS_DEFAULT,
order_by: Optional[List[str]] = None,
) -> List[RunInfo]:
"""
Return run information for runs which belong to the experiment_id.
:param experiment_id: The experiment id which to search
:param run_view_type: ACTIVE_ONLY, DELETED_ONLY, or ALL runs
:param max_results: Maximum number of results desired.
:param order_by: List of order_by clauses. Currently supported values are
are ``metric.key``, ``parameter.key``, ``tag.key``, ``attribute.key``.
For example, ``order_by=["tag.release ASC", "metric.click_rate DESC"]``.
:return: A list of :py:class:`RunInfo <mlflow.entities.RunInfo>` objects that satisfy the
search expressions.
.. code-block:: python
:caption: Example
import mlflow
from mlflow.entities import ViewType
# Create two runs
with mlflow.start_run() as run1:
mlflow.log_param("p", 0)
with mlflow.start_run() as run2:
mlflow.log_param("p", 1)
# Delete the last run
mlflow.delete_run(run2.info.run_id)
def print_run_infos(run_infos):
for r in run_infos:
print("- run_id: {}, lifecycle_stage: {}".format(r.run_id, r.lifecycle_stage))
print("Active runs:")
print_run_infos(mlflow.list_run_infos("0", run_view_type=ViewType.ACTIVE_ONLY))
print("Deleted runs:")
print_run_infos(mlflow.list_run_infos("0", run_view_type=ViewType.DELETED_ONLY))
print("All runs:")
print_run_infos(mlflow.list_run_infos("0", run_view_type=ViewType.ALL))
.. code-block:: text
:caption: Output
Active runs:
- run_id: 4937823b730640d5bed9e3e5057a2b34, lifecycle_stage: active
Deleted runs:
- run_id: b13f1badbed842cf9975c023d23da300, lifecycle_stage: deleted
All runs:
- run_id: b13f1badbed842cf9975c023d23da300, lifecycle_stage: deleted
- run_id: 4937823b730640d5bed9e3e5057a2b34, lifecycle_stage: active
"""
# Using an internal function as the linter doesn't like assigning a lambda, and inlining the
# full thing is a mess
def pagination_wrapper_func(number_to_get, next_page_token):
return MlflowClient().list_run_infos(
experiment_id, run_view_type, number_to_get, order_by, next_page_token
)
return _paginate(pagination_wrapper_func, SEARCH_MAX_RESULTS_DEFAULT, max_results)
def _paginate(paginated_fn, max_results_per_page, max_results=None):
"""
Intended to be a general use pagination utility.
:param paginated_fn:
:type paginated_fn: This function is expected to take in the number of results to retrieve
per page and a pagination token, and return a PagedList object
:param max_results_per_page:
:type max_results_per_page: The maximum number of results to retrieve per page
:param max_results:
:type max_results: The maximum number of results to retrieve overall. If unspecified,
all results will be retrieved.
:return: Returns a list of entities, as determined by the paginated_fn parameter, with no more
entities than specified by max_results
:rtype: list[object]
"""
all_results = []
next_page_token = None
returns_all = max_results is None
while returns_all or len(all_results) < max_results:
num_to_get = max_results_per_page if returns_all else max_results - len(all_results)
if num_to_get < max_results_per_page:
page_results = paginated_fn(num_to_get, next_page_token)
else:
page_results = paginated_fn(max_results_per_page, next_page_token)
all_results.extend(page_results)
if hasattr(page_results, "token") and page_results.token:
next_page_token = page_results.token
else:
break
return all_results
def _get_or_start_run():
if len(_active_run_stack) > 0:
return _active_run_stack[-1]
return start_run()
def _get_experiment_id_from_env():
experiment_name = env.get_env(_EXPERIMENT_NAME_ENV_VAR)
if experiment_name is not None:
exp = MlflowClient().get_experiment_by_name(experiment_name)
return exp.experiment_id if exp else None
return env.get_env(_EXPERIMENT_ID_ENV_VAR)
def _get_experiment_id():
return (
_active_experiment_id
or _get_experiment_id_from_env()
or default_experiment_registry.get_experiment_id()
)
@autologging_integration("mlflow")
def autolog(
log_input_examples: bool = False,
log_model_signatures: bool = True,
log_models: bool = True,
disable: bool = False,
exclusive: bool = False,
disable_for_unsupported_versions: bool = False,
silent: bool = False,
# pylint: disable=unused-argument
) -> None:
"""
Enables (or disables) and configures autologging for all supported integrations.
The parameters are passed to any autologging integrations that support them.
See the :ref:`tracking docs <automatic-logging>` for a list of supported autologging
integrations.
Note that framework-specific configurations set at any point will take precedence over
any configurations set by this function. For example:
.. code-block:: python
mlflow.autolog(log_models=False, exclusive=True)
import sklearn
would enable autologging for `sklearn` with `log_models=False` and `exclusive=True`,
but
.. code-block:: python
mlflow.autolog(log_models=False, exclusive=True)
import sklearn
mlflow.sklearn.autolog(log_models=True)
would enable autologging for `sklearn` with `log_models=True` and `exclusive=False`,
the latter resulting from the default value for `exclusive` in `mlflow.sklearn.autolog`;
other framework autolog functions (e.g. `mlflow.tensorflow.autolog`) would use the
configurations set by `mlflow.autolog` (in this instance, `log_models=False`, `exclusive=True`),
until they are explicitly called by the user.
:param log_input_examples: If ``True``, input examples from training datasets are collected and
logged along with model artifacts during training. If ``False``,
input examples are not logged.
Note: Input examples are MLflow model attributes
and are only collected if ``log_models`` is also ``True``.
:param log_model_signatures: If ``True``,
:py:class:`ModelSignatures <mlflow.models.ModelSignature>`
describing model inputs and outputs are collected and logged along
with model artifacts during training. If ``False``, signatures are
not logged. Note: Model signatures are MLflow model attributes
and are only collected if ``log_models`` is also ``True``.
:param log_models: If ``True``, trained models are logged as MLflow model artifacts.
If ``False``, trained models are not logged.
Input examples and model signatures, which are attributes of MLflow models,
are also omitted when ``log_models`` is ``False``.
:param disable: If ``True``, disables all supported autologging integrations. If ``False``,
enables all supported autologging integrations.
:param exclusive: If ``True``, autologged content is not logged to user-created fluent runs.
If ``False``, autologged content is logged to the active fluent run,
which may be user-created.
:param disable_for_unsupported_versions: If ``True``, disable autologging for versions of
all integration libraries that have not been tested against this version
of the MLflow client or are incompatible.
:param silent: If ``True``, suppress all event logs and warnings from MLflow during autologging
setup and training execution. If ``False``, show all events and warnings during
autologging setup and training execution.
.. code-block:: python
:caption: Example
import numpy as np
import mlflow.sklearn
from mlflow.tracking import MlflowClient
from sklearn.linear_model import LinearRegression
def print_auto_logged_info(r):
tags = {k: v for k, v in r.data.tags.items() if not k.startswith("mlflow.")}
artifacts = [f.path for f in MlflowClient().list_artifacts(r.info.run_id, "model")]
print("run_id: {}".format(r.info.run_id))
print("artifacts: {}".format(artifacts))
print("params: {}".format(r.data.params))
print("metrics: {}".format(r.data.metrics))
print("tags: {}".format(tags))
# prepare training data
X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
y = np.dot(X, np.array([1, 2])) + 3
# Auto log all the parameters, metrics, and artifacts
mlflow.autolog()
model = LinearRegression()
with mlflow.start_run() as run:
model.fit(X, y)
# fetch the auto logged parameters and metrics for ended run
print_auto_logged_info(mlflow.get_run(run_id=run.info.run_id))
.. code-block:: text
:caption: Output
run_id: fd10a17d028c47399a55ab8741721ef7
artifacts: ['model/MLmodel', 'model/conda.yaml', 'model/model.pkl']
params: {'copy_X': 'True',
'normalize': 'False',
'fit_intercept': 'True',
'n_jobs': 'None'}
metrics: {'training_score': 1.0,
'training_rmse': 4.440892098500626e-16,
'training_r2_score': 1.0,
'training_mae': 2.220446049250313e-16,
'training_mse': 1.9721522630525295e-31}
tags: {'estimator_class': 'sklearn.linear_model._base.LinearRegression',
'estimator_name': 'LinearRegression'}
"""
from mlflow import (
tensorflow,
keras,
gluon,
xgboost,
lightgbm,
pyspark,
statsmodels,
spark,
sklearn,
fastai,
pytorch,
)
locals_copy = locals().items()
# Mapping of library module name to specific autolog function
# eg: mxnet.gluon is the actual library, mlflow.gluon.autolog is our autolog function for it
LIBRARY_TO_AUTOLOG_FN = {
"tensorflow": tensorflow.autolog,
"keras": keras.autolog,
"mxnet.gluon": gluon.autolog,
"xgboost": xgboost.autolog,
"lightgbm": lightgbm.autolog,
"statsmodels": statsmodels.autolog,
"sklearn": sklearn.autolog,
"fastai": fastai.autolog,
"pyspark": spark.autolog,
"pyspark.ml": pyspark.ml.autolog,
# TODO: Broaden this beyond pytorch_lightning as we add autologging support for more
# Pytorch frameworks under mlflow.pytorch.autolog
"pytorch_lightning": pytorch.autolog,
}
CONF_KEY_IS_GLOBALLY_CONFIGURED = "globally_configured"
def get_autologging_params(autolog_fn):
try:
needed_params = list(inspect.signature(autolog_fn).parameters.keys())
return {k: v for k, v in locals_copy if k in needed_params}
except Exception:
return {}
def setup_autologging(module):
try:
autolog_fn = LIBRARY_TO_AUTOLOG_FN[module.__name__]
# Only call integration's autolog function with `mlflow.autolog` configs
# if the integration's autolog function has not already been called by the user.
# Logic is as follows:
# - if a previous_config exists, that means either `mlflow.autolog` or
# `mlflow.integration.autolog` was called.
# - if the config contains `CONF_KEY_IS_GLOBALLY_CONFIGURED`, the configuration
# was set by `mlflow.autolog`, and so we can safely call `autolog_fn` with
# `autologging_params`.
# - if the config doesn't contain this key, the configuration was set by an
# `mlflow.integration.autolog` call, so we should not call `autolog_fn` with
# new configs.
prev_config = AUTOLOGGING_INTEGRATIONS.get(autolog_fn.integration_name)
if prev_config and not prev_config.get(CONF_KEY_IS_GLOBALLY_CONFIGURED, False):
return
autologging_params = get_autologging_params(autolog_fn)
autolog_fn(**autologging_params)
AUTOLOGGING_INTEGRATIONS[autolog_fn.integration_name][
CONF_KEY_IS_GLOBALLY_CONFIGURED
] = True
if not autologging_is_disabled(
autolog_fn.integration_name
) and not autologging_params.get("silent", False):
_logger.info("Autologging successfully enabled for %s.", module.__name__)
except Exception as e:
if is_testing():
# Raise unexpected exceptions in test mode in order to detect
# errors within dependent autologging integrations
raise
elif not autologging_params.get("silent", False):
_logger.warning(
"Exception raised while enabling autologging for %s: %s",
module.__name__,
str(e),
)
# for each autolog library (except pyspark), register a post-import hook.
# this way, we do not send any errors to the user until we know they are using the library.
# the post-import hook also retroactively activates for previously-imported libraries.
for module in list(
set(LIBRARY_TO_AUTOLOG_FN.keys()) - set(["tensorflow", "keras", "pyspark", "pyspark.ml"])
):
register_post_import_hook(setup_autologging, module, overwrite=True)
FULLY_IMPORTED_KERAS = False
TF_AUTOLOG_SETUP_CALLED = False
def conditionally_set_up_keras_autologging(keras_module):
nonlocal FULLY_IMPORTED_KERAS, TF_AUTOLOG_SETUP_CALLED
FULLY_IMPORTED_KERAS = True
if Version(keras_module.__version__) >= Version("2.6.0"):
# NB: Keras unconditionally depends on TensorFlow beginning with Version 2.6.0, and
# many classes defined in the `keras` module are aliases of classes in the `tf.keras`
# module. Accordingly, TensorFlow autologging serves as a replacement for Keras
# autologging in Keras >= 2.6.0
try:
import tensorflow
setup_autologging(tensorflow)
TF_AUTOLOG_SETUP_CALLED = True
except Exception as e:
_logger.debug(
"Failed to set up TensorFlow autologging for tf.keras models upon"
" Keras library import: %s",
str(e),
)
raise
else:
setup_autologging(keras_module)
register_post_import_hook(conditionally_set_up_keras_autologging, "keras", overwrite=True)
def set_up_tensorflow_autologging(tensorflow_module):
import sys
nonlocal FULLY_IMPORTED_KERAS, TF_AUTOLOG_SETUP_CALLED
if "keras" in sys.modules and not FULLY_IMPORTED_KERAS:
# In Keras >= 2.6.0, importing Keras imports the TensorFlow library, which can
# trigger this autologging import hook for TensorFlow before the entire Keras import
# procedure is completed. Attempting to set up autologging before the Keras import
# procedure has completed will result in a failure due to the unavailability of
# certain modules. In this case, we terminate the TensorFlow autologging import hook
# and rely on the Keras autologging import hook to successfully set up TensorFlow
# autologging for tf.keras models once the Keras import procedure has completed
return
# By design, in Keras >= 2.6.0, Keras needs to enable tensorflow autologging so that
# tf.keras models always use tensorflow autologging, rather than vanilla keras autologging.
# As a result, Keras autologging must call `mlflow.tensorflow.autolog()` in Keras >= 2.6.0.
# Accordingly, we insert this check to ensure that importing tensorflow, which may import
# keras, does not enable tensorflow autologging twice.
if not TF_AUTOLOG_SETUP_CALLED:
setup_autologging(tensorflow_module)
register_post_import_hook(set_up_tensorflow_autologging, "tensorflow", overwrite=True)
# for pyspark, we activate autologging immediately, without waiting for a module import.
# this is because on Databricks a SparkSession already exists and the user can directly
# interact with it, and this activity should be logged.
try:
import pyspark as pyspark_module
import pyspark.ml as pyspark_ml_module
setup_autologging(pyspark_module)
setup_autologging(pyspark_ml_module)
except ImportError as ie:
# if pyspark isn't installed, a user could potentially install it in the middle
# of their session so we want to enable autologging once they do
if "pyspark" in str(ie):
register_post_import_hook(setup_autologging, "pyspark", overwrite=True)
register_post_import_hook(setup_autologging, "pyspark.ml", overwrite=True)
except Exception as e:
if is_testing():
# Raise unexpected exceptions in test mode in order to detect
# errors within dependent autologging integrations
raise
else:
_logger.warning("Exception raised while enabling autologging for spark: %s", str(e))
| 39.333525
| 100
| 0.638119
|
6c4dbedf8128fda560f7c312c84bab9e8f5ecfb9
| 2,687
|
py
|
Python
|
movement_primitives/data/_minimum_jerk.py
|
maotto/movement_primitives
|
b79c78a5a0667cc24a26b7b6cc64a5762d8f4dd4
|
[
"BSD-3-Clause"
] | 17
|
2021-11-17T15:36:16.000Z
|
2022-03-26T08:49:25.000Z
|
movement_primitives/data/_minimum_jerk.py
|
DavidYaonanZhu/movement_primitives
|
ce355837f06cb5fada24be7259cb0305e8ea5d91
|
[
"BSD-3-Clause"
] | 9
|
2021-12-01T10:33:04.000Z
|
2022-03-23T12:41:39.000Z
|
movement_primitives/data/_minimum_jerk.py
|
DavidYaonanZhu/movement_primitives
|
ce355837f06cb5fada24be7259cb0305e8ea5d91
|
[
"BSD-3-Clause"
] | 8
|
2021-11-25T03:53:40.000Z
|
2022-03-31T03:19:25.000Z
|
import numpy as np
def generate_minimum_jerk(start, goal, execution_time=1.0, dt=0.01):
"""Create a minimum jerk trajectory.
A minimum jerk trajectory from :math:`x_0` to :math:`g` minimizes
the third time derivative of the positions:
.. math::
\\arg \min_{x_0, \ldots, x_T} \int_{t=0}^T \dddot{x}(t)^2 dt
The trajectory will have
.. code-block:: python
n_steps = 1 + execution_time / dt
steps because we start at 0 seconds and end at execution_time seconds.
Parameters
----------
start : array-like, shape (n_dims,)
Initial state
goal : array-like, shape (n_dims,)
Goal state
execution_time : float, optional (default: 1)
Execution time in seconds
dt : float, optional (default: 0.01)
Time between successive steps in seconds
Returns
-------
X : array, shape (n_steps, n_dims)
The positions of the trajectory
Xd : array, shape (n_steps, n_dims)
The velocities of the trajectory
Xdd : array, shape (n_steps, n_dims)
The accelerations of the trajectory
Raises
------
ValueError
If the shapes of the initial and goal state do not match.
"""
x0 = np.asarray(start)
g = np.asarray(goal)
if x0.shape != g.shape:
raise ValueError("Shape of initial state %s and goal %s must be equal"
% (x0.shape, g.shape))
n_dims = x0.shape[0]
n_steps = 1 + int(execution_time / dt)
X = np.zeros((n_steps, n_dims))
Xd = np.zeros((n_steps, n_dims))
Xdd = np.zeros((n_steps, n_dims))
x = x0.copy()
xd = np.zeros(n_dims)
xdd = np.zeros(n_dims)
X[0] = x
for t in range(1, n_steps):
tau = execution_time - t * dt
if tau >= dt:
dist = g - x
a1 = 0
a0 = xdd * tau ** 2
v1 = 0
v0 = xd * tau
t1 = dt
t2 = dt ** 2
t3 = dt ** 3
t4 = dt ** 4
t5 = dt ** 5
c1 = (6. * dist + (a1 - a0) / 2. - 3. * (v0 + v1)) / tau ** 5
c2 = (-15. * dist + (3. * a0 - 2. * a1) / 2. + 8. * v0 +
7. * v1) / tau ** 4
c3 = (10. * dist + (a1 - 3. * a0) / 2. - 6. * v0 -
4. * v1) / tau ** 3
c4 = xdd / 2.
c5 = xd
c6 = x
x = c1 * t5 + c2 * t4 + c3 * t3 + c4 * t2 + c5 * t1 + c6
xd = (5. * c1 * t4 + 4 * c2 * t3 + 3 * c3 * t2 + 2 * c4 * t1 + c5)
xdd = (20. * c1 * t3 + 12. * c2 * t2 + 6. * c3 * t1 + 2. * c4)
X[t] = x
Xd[t] = xd
Xdd[t] = xdd
return X, Xd, Xdd
| 25.590476
| 78
| 0.484927
|
b239e9d99986355452a79bf2ac4bb9e7a9e33c14
| 5,467
|
py
|
Python
|
beit/semantic_segmentation/tools/test.py
|
Sanster/unilm
|
8dc116038a49f51abb9506071283a817af43fd60
|
[
"MIT"
] | 5,129
|
2019-09-30T11:21:03.000Z
|
2022-03-31T22:35:12.000Z
|
beit/semantic_segmentation/tools/test.py
|
Sanster/unilm
|
8dc116038a49f51abb9506071283a817af43fd60
|
[
"MIT"
] | 604
|
2019-10-05T00:39:46.000Z
|
2022-03-31T11:12:07.000Z
|
beit/semantic_segmentation/tools/test.py
|
Sanster/unilm
|
8dc116038a49f51abb9506071283a817af43fd60
|
[
"MIT"
] | 1,034
|
2019-09-30T15:01:32.000Z
|
2022-03-31T06:14:50.000Z
|
import argparse
import os
import mmcv
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from mmcv.utils import DictAction
from mmseg.apis import multi_gpu_test, single_gpu_test
from mmseg.datasets import build_dataloader, build_dataset
from mmseg.models import build_segmentor
from backbone import beit
def parse_args():
parser = argparse.ArgumentParser(
description='mmseg test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--aug-test', action='store_true', help='Use Flip and Multi scale aug')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "mIoU"'
' for generic datasets, and "cityscapes" for Cityscapes')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu_collect is not specified')
parser.add_argument(
'--options', nargs='+', action=DictAction, help='custom options')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if args.options is not None:
cfg.merge_from_dict(args.options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if args.aug_test:
# hard code index
cfg.data.test.pipeline[1].img_ratios = [
0.5, 0.75, 1.0, 1.25, 1.5, 1.75
]
cfg.data.test.pipeline[1].flip = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg'))
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
model.CLASSES = checkpoint['meta']['CLASSES']
model.PALETTE = checkpoint['meta']['PALETTE']
efficient_test = False
if args.eval_options is not None:
efficient_test = args.eval_options.get('efficient_test', False)
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
efficient_test)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect, efficient_test)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
dataset.evaluate(outputs, args.eval, **kwargs)
if __name__ == '__main__':
main()
| 36.205298
| 79
| 0.646607
|
cb251d46489255b0a3f31d69a14afe0f5e39044b
| 291,287
|
py
|
Python
|
pandas/core/frame.py
|
dhimmel/pandas
|
776fed3ab63d74ddef6e5af1a702b10c2a30bbb6
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/frame.py
|
dhimmel/pandas
|
776fed3ab63d74ddef6e5af1a702b10c2a30bbb6
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/frame.py
|
dhimmel/pandas
|
776fed3ab63d74ddef6e5af1a702b10c2a30bbb6
|
[
"BSD-3-Clause"
] | null | null | null |
"""
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import division
# pylint: disable=E1101,E1103
# pylint: disable=W0212,W0231,W0703,W0622
import functools
import collections
import itertools
import sys
import warnings
from textwrap import dedent
import numpy as np
import numpy.ma as ma
from pandas.core.accessor import CachedAccessor
from pandas.core.dtypes.cast import (
maybe_upcast,
cast_scalar_to_array,
construct_1d_arraylike_from_scalar,
infer_dtype_from_scalar,
maybe_cast_to_datetime,
maybe_infer_to_datetimelike,
maybe_convert_platform,
maybe_downcast_to_dtype,
invalidate_string_dtypes,
coerce_to_dtypes,
maybe_upcast_putmask,
find_common_type)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_object_dtype,
is_extension_type,
is_extension_array_dtype,
is_datetimetz,
is_datetime64_any_dtype,
is_bool_dtype,
is_integer_dtype,
is_float_dtype,
is_integer,
is_scalar,
is_dtype_equal,
needs_i8_conversion,
_get_dtype_from_object,
ensure_float64,
ensure_int64,
ensure_platform_int,
is_list_like,
is_nested_list_like,
is_iterator,
is_sequence,
is_named_tuple)
from pandas.core.dtypes.concat import _get_sliced_frame_result_type
from pandas.core.dtypes.missing import isna, notna
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, ensure_index,
ensure_index_from_sequences)
from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable,
check_bool_indexer)
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.series import Series
from pandas.core.arrays import Categorical, ExtensionArray
import pandas.core.algorithms as algorithms
from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u,
OrderedDict, raise_with_traceback,
string_and_binary_types)
from pandas import compat
from pandas.compat import PY36
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (Appender, Substitution,
rewrite_axis_style_signature,
deprecate_kwarg)
from pandas.util._validators import (validate_bool_kwarg,
validate_axis_style_args)
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
import pandas.core.indexes.base as ibase
import pandas.core.common as com
import pandas.core.nanops as nanops
import pandas.core.ops as ops
import pandas.io.formats.console as console
import pandas.io.formats.format as fmt
from pandas.io.formats.printing import pprint_thing
import pandas.plotting._core as gfx
from pandas._libs import lib, algos as libalgos
from pandas.core.config import get_option
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = dict(
axes='index, columns', klass='DataFrame',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
axis="""
axis : {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index': apply function to each column.
- 1 or 'columns': apply function to each row.""",
optional_by="""
by : str or list of str
Name or list of names to sort by.
- if `axis` is 0 or `'index'` then `by` may contain index
levels and/or column labels
- if `axis` is 1 or `'columns'` then `by` may contain column
levels and/or index labels
.. versionchanged:: 0.23.0
Allow specifying index or column level names.""",
versionadded_to_excel='',
optional_labels="""labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
optional_axis="""axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).""",
)
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame or named Series objects by performing a database-style join
operation by columns or indexes.
If joining columns on columns, the DataFrame indexes *will be
ignored*. Otherwise if joining indexes on indexes or indexes on a column or
columns, the index will be passed on.
Parameters
----------%s
right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
Type of merge to be performed.
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : boolean, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels.
right_index : boolean, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index.
sort : boolean, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword).
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively.
copy : boolean, default True
If False, avoid copy if possible.
indicator : boolean or string, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row.
If string, column with information on source of each row will be added to
output DataFrame, and column will be named value of string.
Information column is Categorical-type and takes on a value of "left_only"
for observations whose merge key only appears in 'left' DataFrame,
"right_only" for observations whose merge key only appears in 'right'
DataFrame, and "both" if the observation's merge key is found in both.
validate : string, default None
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
left and right datasets.
* "one_to_many" or "1:m": check if merge keys are unique in left
dataset.
* "many_to_one" or "m:1": check if merge keys are unique in right
dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
.. versionadded:: 0.21.0
Returns
-------
DataFrame
Notes
-----
Support for specifying index levels as the `on`, `left_on`, and
`right_on` parameters was added in version 0.23.0
Support for merging named Series objects was added in version 0.24.0
See Also
--------
merge_ordered : merge with optional filling/interpolation.
merge_asof : merge on nearest keys.
DataFrame.join : similar method using indices.
Examples
--------
>>> A = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]})
>>> B = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]})
>>> A
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> B
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
>>> A.merge(B, left_on='lkey', right_on='rkey', how='outer')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame):
""" Two-dimensional size-mutable, potentially heterogeneous tabular data
structure with labeled axes (rows and columns). Arithmetic operations
align on both row and column labels. Can be thought of as a dict-like
container for Series objects. The primary pandas data structure.
Parameters
----------
data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects
.. versionchanged :: 0.23.0
If data is a dict, argument order is maintained for Python 3.6
and later.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = pd.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = pd.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df2
a b c d e
0 2 8 8 3 4
1 4 2 9 0 9
2 1 0 7 8 0
3 5 1 7 1 3
4 6 0 2 4 2
See also
--------
DataFrame.from_records : constructor from tuples, also record arrays
DataFrame.from_dict : from dicts of Series, arrays, or dicts
DataFrame.from_items : from sequence of (key, value) pairs
pandas.read_csv, pandas.read_table, pandas.read_clipboard
"""
@property
def _constructor(self):
return DataFrame
_constructor_sliced = Series
_deprecations = NDFrame._deprecations | frozenset(
['sortlevel', 'get_value', 'set_value', 'from_csv', 'from_items'])
_accessors = set()
@property
def _constructor_expanddim(self):
from pandas.core.panel import Panel
return Panel
def __init__(self, data=None, index=None, columns=None, dtype=None,
copy=False):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._data
if isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = _masked_rec_array_to_mgr(data, index, columns, dtype,
copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = {k: data[k] for k in data_columns}
if columns is None:
columns = data_columns
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif getattr(data, 'name', None) is not None:
mgr = self._init_dict({data.name: data}, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
# For data is list-like, or Iterable (will consume into list)
elif (isinstance(data, collections.Iterable)
and not isinstance(data, string_and_binary_types)):
if not isinstance(data, collections.Sequence):
data = list(data)
if len(data) > 0:
if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = _to_arrays(data, columns, dtype=dtype)
columns = ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = _get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = ibase.default_index(len(data[0]))
else:
index = ibase.default_index(len(data))
mgr = _arrays_to_mgr(arrays, columns, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
else:
mgr = self._init_dict({}, index, columns, dtype=dtype)
else:
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as e:
exc = TypeError('DataFrame constructor called with '
'incompatible data and dtype: {e}'.format(e=e))
raise_with_traceback(exc)
if arr.ndim == 0 and index is not None and columns is not None:
values = cast_scalar_to_array((len(index), len(columns)),
data, dtype=dtype)
mgr = self._init_ndarray(values, index, columns,
dtype=values.dtype, copy=False)
else:
raise ValueError('DataFrame constructor not properly called!')
NDFrame.__init__(self, mgr, fastpath=True)
def _init_dict(self, data, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
if columns is not None:
arrays = Series(data, index=columns, dtype=object)
data_names = arrays.index
missing = arrays.isnull()
if index is None:
# GH10856
# raise ValueError if only scalars in dict
index = extract_index(arrays[~missing])
else:
index = ensure_index(index)
# no obvious "empty" int column
if missing.any() and not is_integer_dtype(dtype):
if dtype is None or np.issubdtype(dtype, np.flexible):
# 1783
nan_dtype = object
else:
nan_dtype = dtype
v = construct_1d_arraylike_from_scalar(np.nan, len(index),
nan_dtype)
arrays.loc[missing] = [v] * missing.sum()
else:
keys = com.dict_keys_to_ordered_list(data)
columns = data_names = Index(keys)
arrays = [data[k] for k in keys]
return _arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
def _init_ndarray(self, values, index, columns, dtype=None, copy=False):
# input must be a ndarray, list, Series, index
if isinstance(values, Series):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# helper to create the axes as indexes
def _get_axes(N, K, index=index, columns=columns):
# return axes or defaults
if index is None:
index = ibase.default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = ibase.default_index(K)
else:
columns = ensure_index(columns)
return index, columns
# we could have a categorical type passed or coerced to 'category'
# recast this to an _arrays_to_mgr
if (is_categorical_dtype(getattr(values, 'dtype', None)) or
is_categorical_dtype(dtype)):
if not hasattr(values, 'dtype'):
values = _prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
index, columns = _get_axes(len(values), 1)
return _arrays_to_mgr([values], columns, index, columns,
dtype=dtype)
elif (is_datetimetz(values) or is_extension_array_dtype(values)):
# GH19157
if columns is None:
columns = [0]
return _arrays_to_mgr([values], columns, index, columns,
dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarray(values, copy=copy)
if dtype is not None:
if not is_dtype_equal(values.dtype, dtype):
try:
values = values.astype(dtype)
except Exception as orig:
e = ValueError("failed to cast to '{dtype}' (Exception "
"was: {orig})".format(dtype=dtype,
orig=orig))
raise_with_traceback(e)
index, columns = _get_axes(*values.shape)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values):
values = maybe_infer_to_datetimelike(values)
return create_block_manager_from_blocks([values], [columns, index])
@property
def axes(self):
"""
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.axes
[RangeIndex(start=0, stop=2, step=1), Index(['coll', 'col2'],
dtype='object')]
"""
return [self.index, self.columns]
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
See Also
--------
ndarray.shape
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self.index), len(self.columns)
def _repr_fits_vertical_(self):
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width=False):
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns. In case off non-interactive session, no
boundaries apply.
ignore_width is here so ipnb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if ((max_columns and nb_columns > max_columns) or
((not ignore_width) and width and nb_columns > (width // 2))):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not console.in_interactive_session():
return True
if (get_option('display.width') is not None or
console.in_ipython_frontend()):
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actually checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[:min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max(len(l) for l in value.split('\n'))
return repr_width < width
def _info_repr(self):
"""True if the repr should show the info view."""
info_repr_option = (get_option("display.large_repr") == "info")
return info_repr_option and not (self._repr_fits_horizontal_() and
self._repr_fits_vertical_())
def __unicode__(self):
"""
Return a string representation for a particular DataFrame
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
buf = StringIO(u(""))
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols,
line_width=width, show_dimensions=show_dimensions)
return buf.getvalue()
def _repr_html_(self):
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
# qtconsole doesn't report its line width, and also
# behaves badly when outputting an HTML table
# that doesn't fit the window, so disable it.
# XXX: In IPython 3.x and above, the Qt console will not attempt to
# display HTML, so this check can be removed when support for
# IPython 2.x is no longer needed.
if console.in_qtconsole():
# 'HTML output is disabled in QtConsole'
return None
if self._info_repr():
buf = StringIO(u(""))
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace('<', r'<', 1)
val = val.replace('>', r'>', 1)
return '<pre>' + val + '</pre>'
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
return self.to_html(max_rows=max_rows, max_cols=max_cols,
show_dimensions=show_dimensions, notebook=True)
else:
return None
@property
def style(self):
"""
Property returning a Styler object containing methods for
building a styled HTML representation fo the DataFrame.
See Also
--------
pandas.io.formats.style.Styler
"""
from pandas.io.formats.style import Styler
return Styler(self)
def iteritems(self):
"""
Iterator over (column name, Series) pairs.
See also
--------
iterrows : Iterate over DataFrame rows as (index, Series) pairs.
itertuples : Iterate over DataFrame rows as namedtuples of the values.
"""
if self.columns.is_unique and hasattr(self, '_item_cache'):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
def iterrows(self):
"""
Iterate over DataFrame rows as (index, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
Returns
-------
it : generator
A generator that iterates over the rows of the frame.
See also
--------
itertuples : Iterate over DataFrame rows as namedtuples of the values.
iteritems : Iterate over (column name, Series) pairs.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index=True, name="Pandas"):
"""
Iterate over DataFrame rows as namedtuples, with index value as first
element of the tuple.
Parameters
----------
index : boolean, default True
If True, return the index as the first element of the tuple.
name : string, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
See also
--------
iterrows : Iterate over DataFrame rows as (index, Series) pairs.
iteritems : Iterate over (column name, Series) pairs.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [0.1, 0.2]},
index=['a', 'b'])
>>> df
col1 col2
a 1 0.1
b 2 0.2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='a', col1=1, col2=0.10000000000000001)
Pandas(Index='b', col1=2, col2=0.20000000000000001)
"""
arrays = []
fields = []
if index:
arrays.append(self.index)
fields.append("Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
# Python 3 supports at most 255 arguments to constructor, and
# things get slow with this many fields in Python 2
if name is not None and len(self.columns) + index < 256:
# `rename` is unsupported in Python 2.6
try:
itertuple = collections.namedtuple(name,
fields + list(self.columns),
rename=True)
return map(itertuple._make, zip(*arrays))
except Exception:
pass
# fallback to regular tuples
return zip(*arrays)
items = iteritems
def __len__(self):
"""Returns length of info axis, but here we use the index """
return len(self.index)
def dot(self, other):
"""
Matrix multiplication with DataFrame or Series objects. Can also be
called using `self @ other` in Python >= 3.5.
Parameters
----------
other : DataFrame or Series
Returns
-------
dot_product : DataFrame or Series
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if (len(common) > len(self.columns) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError('Dot product shape mismatch, '
'{l} vs {r}'.format(l=lvals.shape,
r=rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals), index=left.index,
columns=other.columns)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return Series(result, index=left.index)
else: # pragma: no cover
raise TypeError('unsupported type: {oth}'.format(oth=type(other)))
def __matmul__(self, other):
""" Matrix multiplication using binary `@` operator in Python>=3.5 """
return self.dot(other)
def __rmatmul__(self, other):
""" Matrix multiplication using binary `@` operator in Python>=3.5 """
return self.T.dot(np.transpose(other)).T
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient='columns', dtype=None, columns=None):
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
.. versionadded:: 0.23.0
Returns
-------
pandas.DataFrame
See Also
--------
DataFrame.from_records : DataFrame from ndarray (structured
dtype), list of tuples, dict, or DataFrame
DataFrame : DataFrame object creation using constructor
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data, orient='index')
0 1 2 3
row_1 3 2 1 0
row_2 a b c d
When using the 'index' orientation, the column names can be
specified manually:
>>> pd.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D'])
A B C D
row_1 3 2 1 0
row_2 a b c d
"""
index = None
orient = orient.lower()
if orient == 'index':
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient == 'columns':
if columns is not None:
raise ValueError("cannot use columns parameter with "
"orient='columns'")
else: # pragma: no cover
raise ValueError('only recognize index or columns for orient')
return cls(data, index=index, columns=columns, dtype=dtype)
def to_dict(self, orient='dict', into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
result : collections.Mapping like {column -> {index -> value}}
See Also
--------
DataFrame.from_dict: create a DataFrame from a dictionary
DataFrame.to_json: convert a DataFrame to JSON format
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
col1 col2
a 1 0.50
b 2 0.75
>>> df.to_dict()
{'col1': {'a': 1, 'b': 2}, 'col2': {'a': 0.5, 'b': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': a 1
b 2
Name: col1, dtype: int64,
'col2': a 0.50
b 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'index': ['a', 'b'], 'columns': ['col1', 'col2'],
'data': [[1.0, 0.5], [2.0, 0.75]]}
>>> df.to_dict('records')
[{'col1': 1.0, 'col2': 0.5}, {'col1': 2.0, 'col2': 0.75}]
>>> df.to_dict('index')
{'a': {'col1': 1.0, 'col2': 0.5}, 'b': {'col1': 2.0, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('a', 1), ('b', 2)])),
('col2', OrderedDict([('a', 0.5), ('b', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<class 'list'>, {'col1': 1.0, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2.0, 'col2': 0.75})]
"""
if not self.columns.is_unique:
warnings.warn("DataFrame columns are not unique, some "
"columns will be omitted.", UserWarning,
stacklevel=2)
# GH16122
into_c = com.standardize_mapping(into)
if orient.lower().startswith('d'):
return into_c(
(k, v.to_dict(into)) for k, v in compat.iteritems(self))
elif orient.lower().startswith('l'):
return into_c((k, v.tolist()) for k, v in compat.iteritems(self))
elif orient.lower().startswith('sp'):
return into_c((('index', self.index.tolist()),
('columns', self.columns.tolist()),
('data', lib.map_infer(self.values.ravel(),
com.maybe_box_datetimelike)
.reshape(self.values.shape).tolist())))
elif orient.lower().startswith('s'):
return into_c((k, com.maybe_box_datetimelike(v))
for k, v in compat.iteritems(self))
elif orient.lower().startswith('r'):
return [into_c((k, com.maybe_box_datetimelike(v))
for k, v in zip(self.columns, np.atleast_1d(row)))
for row in self.values]
elif orient.lower().startswith('i'):
return into_c((t[0], dict(zip(self.columns, t[1:])))
for t in self.itertuples())
else:
raise ValueError("orient '{o}' not understood".format(o=orient))
def to_gbq(self, destination_table, project_id=None, chunksize=None,
reauth=False, if_exists='fail', private_key=None,
auth_local_webserver=False, table_schema=None, location=None,
progress_bar=True, verbose=None):
"""
Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists, do nothing.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
private_key : str, optional
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
verbose : bool, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
pandas.read_gbq : Read a DataFrame from Google BigQuery.
"""
from pandas.io import gbq
return gbq.to_gbq(
self, destination_table, project_id=project_id,
chunksize=chunksize, reauth=reauth,
if_exists=if_exists, private_key=private_key,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema, location=location,
progress_bar=progress_bar, verbose=verbose)
@classmethod
def from_records(cls, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
"""
Convert structured or record ndarray to DataFrame
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
nrows : int, default None
Number of rows to read if data is an iterator
Returns
-------
df : DataFrame
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, 'dtype') and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns = []
for k, v in compat.iteritems(data):
if k in columns:
arr_columns.append(k)
arrays.append(v)
arrays, arr_columns = _reorder_arrays(arrays, arr_columns,
columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = _to_arrays(data, columns)
if columns is not None:
columns = ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = _to_arrays(data, columns,
coerce_float=coerce_float)
arr_columns = ensure_index(arr_columns)
if columns is not None:
columns = ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if (isinstance(index, compat.string_types) or
not hasattr(index, "__iter__")):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
to_remove = [arr_columns.get_loc(field) for field in index]
index_data = [arrays[i] for i in to_remove]
result_index = ensure_index_from_sequences(index_data,
names=index)
exclude.update(index)
except Exception:
result_index = index
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = _arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(self, index=True, convert_datetime64=None):
"""
Convert DataFrame to a NumPy record array.
Index will be put in the 'index' field of the record array if
requested.
Parameters
----------
index : boolean, default True
Include index in resulting record array, stored in 'index' field.
convert_datetime64 : boolean, default None
.. deprecated:: 0.23.0
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex.
Returns
-------
y : numpy.recarray
See Also
--------
DataFrame.from_records: convert structured or record ndarray
to DataFrame.
numpy.recarray: ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False)
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
By default, timestamps are converted to `datetime.datetime`:
>>> df.index = pd.date_range('2018-01-01 09:00', periods=2, freq='min')
>>> df
A B
2018-01-01 09:00:00 1 0.50
2018-01-01 09:01:00 2 0.75
>>> df.to_records()
rec.array([(datetime.datetime(2018, 1, 1, 9, 0), 1, 0.5 ),
(datetime.datetime(2018, 1, 1, 9, 1), 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
The timestamp conversion can be disabled so NumPy's datetime64
data type is used instead:
>>> df.to_records(convert_datetime64=False)
rec.array([('2018-01-01T09:00:00.000000000', 1, 0.5 ),
('2018-01-01T09:01:00.000000000', 2, 0.75)],
dtype=[('index', '<M8[ns]'), ('A', '<i8'), ('B', '<f8')])
"""
if convert_datetime64 is not None:
warnings.warn("The 'convert_datetime64' parameter is "
"deprecated and will be removed in a future "
"version",
FutureWarning, stacklevel=2)
if index:
if is_datetime64_any_dtype(self.index) and convert_datetime64:
ix_vals = [self.index.to_pydatetime()]
else:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = lmap(np.array, zip(*self.index.values))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c].get_values() for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = 'level_%d' % count
count += 1
elif index_names[0] is None:
index_names = ['index']
names = (lmap(compat.text_type, index_names) +
lmap(compat.text_type, self.columns))
else:
arrays = [self[c].get_values() for c in self.columns]
names = lmap(compat.text_type, self.columns)
formats = [v.dtype for v in arrays]
return np.rec.fromarrays(
arrays,
dtype={'names': names, 'formats': formats}
)
@classmethod
def from_items(cls, items, columns=None, orient='columns'):
"""Construct a dataframe from a list of tuples
.. deprecated:: 0.23.0
`from_items` is deprecated and will be removed in a future version.
Use :meth:`DataFrame.from_dict(dict(items)) <DataFrame.from_dict>`
instead.
:meth:`DataFrame.from_dict(OrderedDict(items)) <DataFrame.from_dict>`
may be used to preserve the key order.
Convert (key, value) pairs to DataFrame. The keys will be the axis
index (usually the columns, but depends on the specified
orientation). The values should be arrays or Series.
Parameters
----------
items : sequence of (key, value) pairs
Values should be arrays or Series.
columns : sequence of column labels, optional
Must be passed if orient='index'.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the
input correspond to column labels, pass 'columns'
(default). Otherwise if the keys correspond to the index,
pass 'index'.
Returns
-------
frame : DataFrame
"""
warnings.warn("from_items is deprecated. Please use "
"DataFrame.from_dict(dict(items), ...) instead. "
"DataFrame.from_dict(OrderedDict(items)) may be used to "
"preserve the key order.",
FutureWarning, stacklevel=2)
keys, values = lzip(*items)
if orient == 'columns':
if columns is not None:
columns = ensure_index(columns)
idict = dict(items)
if len(idict) < len(items):
if not columns.equals(ensure_index(keys)):
raise ValueError('With non-unique item names, passed '
'columns must be identical')
arrays = values
else:
arrays = [idict[k] for k in columns if k in idict]
else:
columns = ensure_index(keys)
arrays = values
# GH 17312
# Provide more informative error msg when scalar values passed
try:
return cls._from_arrays(arrays, columns, None)
except ValueError:
if not is_nested_list_like(values):
raise ValueError('The value in each (key, value) pair '
'must be an array, Series, or dict')
elif orient == 'index':
if columns is None:
raise TypeError("Must pass columns with orient='index'")
keys = ensure_index(keys)
# GH 17312
# Provide more informative error msg when scalar values passed
try:
arr = np.array(values, dtype=object).T
data = [lib.maybe_convert_objects(v) for v in arr]
return cls._from_arrays(data, columns, keys)
except TypeError:
if not is_nested_list_like(values):
raise ValueError('The value in each (key, value) pair '
'must be an array, Series, or dict')
else: # pragma: no cover
raise ValueError("'orient' must be either 'columns' or 'index'")
@classmethod
def _from_arrays(cls, arrays, columns, index, dtype=None):
mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
return cls(mgr)
@classmethod
def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True,
encoding=None, tupleize_cols=None,
infer_datetime_format=False):
"""Read CSV file.
.. deprecated:: 0.21.0
Use :func:`pandas.read_csv` instead.
It is preferable to use the more powerful :func:`pandas.read_csv`
for most general purposes, but ``from_csv`` makes for an easy
roundtrip to and from a file (the exact counterpart of
``to_csv``), especially with a DataFrame of time series data.
This method only differs from the preferred :func:`pandas.read_csv`
in some defaults:
- `index_col` is ``0`` instead of ``None`` (take first column as index
by default)
- `parse_dates` is ``True`` instead of ``False`` (try parsing the index
as datetime by default)
So a ``pd.DataFrame.from_csv(path)`` can be replaced by
``pd.read_csv(path, index_col=0, parse_dates=True)``.
Parameters
----------
path : string file path or file handle / StringIO
header : int, default 0
Row to use as header (skip prior rows)
sep : string, default ','
Field delimiter
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
parse_dates : boolean, default True
Parse dates. Different default from read_table
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
See also
--------
pandas.read_csv
Returns
-------
y : DataFrame
"""
warnings.warn("from_csv is deprecated. Please use read_csv(...) "
"instead. Note that some of the default arguments are "
"different, so please refer to the documentation "
"for from_csv when changing your function calls",
FutureWarning, stacklevel=2)
from pandas.io.parsers import read_csv
return read_csv(path, header=header, sep=sep,
parse_dates=parse_dates, index_col=index_col,
encoding=encoding, tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format)
def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparseDataFrame.
Implement the sparse version of the DataFrame meaning that any data
matching a specific value it's omitted in the representation.
The sparse DataFrame allows for a more efficient storage.
Parameters
----------
fill_value : float, default None
The specific value that should be omitted in the representation.
kind : {'block', 'integer'}, default 'block'
The kind of the SparseIndex tracking where data is not equal to
the fill value:
- 'block' tracks only the locations and sizes of blocks of data.
- 'integer' keeps an array with all the locations of the data.
In most cases 'block' is recommended, since it's more memory
efficient.
Returns
-------
SparseDataFrame
The sparse representation of the DataFrame.
See Also
--------
DataFrame.to_dense :
Converts the DataFrame back to the its dense form.
Examples
--------
>>> df = pd.DataFrame([(np.nan, np.nan),
... (1., np.nan),
... (np.nan, 1.)])
>>> df
0 1
0 NaN NaN
1 1.0 NaN
2 NaN 1.0
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
>>> sdf = df.to_sparse()
>>> sdf
0 1
0 NaN NaN
1 1.0 NaN
2 NaN 1.0
>>> type(sdf)
<class 'pandas.core.sparse.frame.SparseDataFrame'>
"""
from pandas.core.sparse.frame import SparseDataFrame
return SparseDataFrame(self._series, index=self.index,
columns=self.columns, default_kind=kind,
default_fill_value=fill_value)
def to_panel(self):
"""
Transform long (stacked) format (DataFrame) into wide (3D, Panel)
format.
.. deprecated:: 0.20.0
Currently the index of the DataFrame must be a 2-level MultiIndex. This
may be generalized later
Returns
-------
panel : Panel
"""
# only support this kind for now
if (not isinstance(self.index, MultiIndex) or # pragma: no cover
len(self.index.levels) != 2):
raise NotImplementedError('Only 2-level MultiIndex are supported.')
if not self.index.is_unique:
raise ValueError("Can't convert non-uniquely indexed "
"DataFrame to Panel")
self._consolidate_inplace()
# minor axis must be sorted
if self.index.lexsort_depth < 2:
selfsorted = self.sort_index(level=0)
else:
selfsorted = self
major_axis, minor_axis = selfsorted.index.levels
major_labels, minor_labels = selfsorted.index.labels
shape = len(major_axis), len(minor_axis)
# preserve names, if any
major_axis = major_axis.copy()
major_axis.name = self.index.names[0]
minor_axis = minor_axis.copy()
minor_axis.name = self.index.names[1]
# create new axes
new_axes = [selfsorted.columns, major_axis, minor_axis]
# create new manager
new_mgr = selfsorted._data.reshape_nd(axes=new_axes,
labels=[major_labels,
minor_labels],
shape=shape,
ref_items=selfsorted.columns)
return self._constructor_expanddim(new_mgr)
def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
columns=None, header=True, index=True, index_label=None,
mode='w', encoding=None, compression='infer', quoting=None,
quotechar='"', line_terminator='\n', chunksize=None,
tupleize_cols=None, date_format=None, doublequote=True,
escapechar=None, decimal='.'):
r"""Write DataFrame to a comma-separated values (csv) file
Parameters
----------
path_or_buf : string or file handle, default None
File path or object, if None is provided the result is returned as
a string.
sep : character, default ','
Field delimiter for the output file.
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R
mode : str
Python write mode, default 'w'
encoding : string, optional
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None},
default 'infer'
If 'infer' and `path_or_buf` is path-like, then detect compression
from the following extensions: '.gz', '.bz2', '.zip' or '.xz'
(otherwise no compression).
.. versionchanged:: 0.24.0
'infer' option added and set to default
line_terminator : string, default ``'\n'``
The newline character or character sequence to use in the output
file
quoting : optional constant from csv module
defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric
quotechar : string (length 1), default '\"'
character used to quote fields
doublequote : boolean, default True
Control quoting of `quotechar` inside a field
escapechar : string (length 1), default None
character used to escape `sep` and `quotechar` when appropriate
chunksize : int or None
rows to write at a time
tupleize_cols : boolean, default False
.. deprecated:: 0.21.0
This argument will be removed and will always write each row
of the multi-index as a separate row in the CSV file.
Write MultiIndex columns as a list of tuples (if True) or in
the new, expanded format, where each MultiIndex column is a row
in the CSV (if False).
date_format : string, default None
Format string for datetime objects
decimal: string, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data
"""
if tupleize_cols is not None:
warnings.warn("The 'tupleize_cols' parameter is deprecated and "
"will be removed in a future version",
FutureWarning, stacklevel=2)
else:
tupleize_cols = False
from pandas.io.formats.csvs import CSVFormatter
formatter = CSVFormatter(self, path_or_buf,
line_terminator=line_terminator, sep=sep,
encoding=encoding,
compression=compression, quoting=quoting,
na_rep=na_rep, float_format=float_format,
cols=columns, header=header, index=index,
index_label=index_label, mode=mode,
chunksize=chunksize, quotechar=quotechar,
tupleize_cols=tupleize_cols,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar, decimal=decimal)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
@Appender(_shared_docs['to_excel'] % _shared_doc_kwargs)
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
@deprecate_kwarg(old_arg_name='encoding', new_arg_name=None)
def to_stata(self, fname, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None, version=114,
convert_strl=None):
"""
Export Stata binary dta files.
Parameters
----------
fname : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() functions. If using a buffer
then the buffer will not be automatically closed after the file
data has been written.
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information.
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`.
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
data_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
version : {114, 117}
Version to use in the output dta file. Version 114 can be used
read by Stata 10 and later. Version 117 can be read by Stata 13
or later. Version 114 limits string variables to 244 characters or
fewer while 117 allows strings with lengths up to 2,000,000
characters.
.. versionadded:: 0.23.0
convert_strl : list, optional
List of column names to convert to string columns to Stata StrL
format. Only available if version is 117. Storing strings in the
StrL format can produce smaller dta files if strings have more than
8 characters and values are repeated.
.. versionadded:: 0.23.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
.. versionadded:: 0.19.0
See Also
--------
pandas.read_stata : Import Stata data files
pandas.io.stata.StataWriter : low-level writer for Stata data files
pandas.io.stata.StataWriter117 : low-level writer for version 117 files
Examples
--------
>>> data.to_stata('./data_file.dta')
Or with dates
>>> data.to_stata('./date_data_file.dta', {2 : 'tw'})
Alternatively you can create an instance of the StataWriter class
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
With dates:
>>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'})
>>> writer.write_file()
"""
kwargs = {}
if version not in (114, 117):
raise ValueError('Only formats 114 and 117 supported.')
if version == 114:
if convert_strl is not None:
raise ValueError('strl support is only available when using '
'format 117')
from pandas.io.stata import StataWriter as statawriter
else:
from pandas.io.stata import StataWriter117 as statawriter
kwargs['convert_strl'] = convert_strl
writer = statawriter(fname, self, convert_dates=convert_dates,
byteorder=byteorder, time_stamp=time_stamp,
data_label=data_label, write_index=write_index,
variable_labels=variable_labels, **kwargs)
writer.write_file()
def to_feather(self, fname):
"""
write out the binary feather-format for DataFrames
.. versionadded:: 0.20.0
Parameters
----------
fname : str
string file path
"""
from pandas.io.feather_format import to_feather
to_feather(self, fname)
def to_parquet(self, fname, engine='auto', compression='snappy',
**kwargs):
"""
Write a DataFrame to the binary parquet format.
.. versionadded:: 0.21.0
This function writes the dataframe as a `parquet file
<https://parquet.apache.org/>`_. You can choose different parquet
backends, and have the option of compression. See
:ref:`the user guide <io.parquet>` for more details.
Parameters
----------
fname : str
String file path.
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
**kwargs
Additional arguments passed to the parquet library. See
:ref:`pandas io <io.parquet>` for more details.
See Also
--------
read_parquet : Read a parquet file.
DataFrame.to_csv : Write a csv file.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_hdf : Write to hdf.
Notes
-----
This function requires either the `fastparquet
<https://pypi.org/project/fastparquet>`_ or `pyarrow
<https://arrow.apache.org/docs/python/>`_ library.
Examples
--------
>>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
>>> df.to_parquet('df.parquet.gzip', compression='gzip')
>>> pd.read_parquet('df.parquet.gzip')
col1 col2
0 1 3
1 2 4
"""
from pandas.io.parquet import to_parquet
to_parquet(self, fname, engine,
compression=compression, **kwargs)
@Substitution(header='Write out the column names. If a list of strings '
'is given, it is assumed to be aliases for the '
'column names')
@Substitution(shared_params=fmt.common_docstring,
returns=fmt.return_docstring)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
line_width=None, max_rows=None, max_cols=None,
show_dimensions=False):
"""
Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
%(returns)s
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> d = {'col1' : [1, 2, 3], 'col2' : [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
"""
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
line_width=line_width,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions)
formatter.to_string()
if buf is None:
result = formatter.buf.getvalue()
return result
@Substitution(header='whether to print column labels, default True')
@Substitution(shared_params=fmt.common_docstring,
returns=fmt.return_docstring)
def to_html(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None, bold_rows=True,
classes=None, escape=True, max_rows=None, max_cols=None,
show_dimensions=False, notebook=False, decimal='.',
border=None, table_id=None):
"""
Render a DataFrame as an HTML table.
%(shared_params)s
bold_rows : boolean, default True
Make the row labels bold in the output
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table
escape : boolean, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
decimal : string, default '.'
Character recognized as decimal separator, e.g. ',' in Europe
.. versionadded:: 0.18.0
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
.. versionadded:: 0.23.0
%(returns)s
See Also
--------
to_string : Convert DataFrame to a string.
"""
if (justify is not None and
justify not in fmt._VALID_JUSTIFY_PARAMETERS):
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
bold_rows=bold_rows, escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal, table_id=table_id)
# TODO: a generic formatter wld b in DataFrameFormatter
formatter.to_html(classes=classes, notebook=notebook, border=border)
if buf is None:
return formatter.buf.getvalue()
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
"""
Print a concise summary of a DataFrame.
This method prints information about a DataFrame including
the index dtype and column dtypes, non-null values and memory usage.
Parameters
----------
verbose : bool, optional
Whether to print the full summary. By default, the setting in
``pandas.options.display.max_info_columns`` is followed.
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used. By default, the setting in
``pandas.options.display.max_info_columns`` is used.
memory_usage : bool, str, optional
Specifies whether total memory usage of the DataFrame
elements (including the index) should be displayed. By default,
this follows the ``pandas.options.display.memory_usage`` setting.
True always show memory usage. False never shows memory usage.
A value of 'deep' is equivalent to "True with deep introspection".
Memory usage is shown in human-readable units (base-2
representation). Without deep introspection a memory estimation is
made based in column dtype and number of rows assuming values
consume the same memory amount for corresponding dtypes. With deep
memory introspection, a real memory usage calculation is performed
at the cost of computational resources.
null_counts : bool, optional
Whether to show the non-null counts. By default, this is shown
only if the frame is smaller than
``pandas.options.display.max_info_rows`` and
``pandas.options.display.max_info_columns``. A value of True always
shows the counts, and False never shows the counts.
Returns
-------
None
This method prints a summary of a DataFrame and returns None.
See Also
--------
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
DataFrame.memory_usage: Memory usage of DataFrame columns.
Examples
--------
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
... "float_col": float_values})
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
int_col 5 non-null int64
text_col 5 non-null object
float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 200.0+ bytes
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
memory usage: 200.0+ bytes
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open("df_info.txt", "w", encoding="utf-8") as f:
... f.write(s)
260
The `memory_usage` parameter allows deep introspection mode, specially
useful for big DataFrames and fine-tune memory optimization:
>>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
>>> df = pd.DataFrame({
... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
... })
>>> df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
column_1 1000000 non-null object
column_2 1000000 non-null object
column_3 1000000 non-null object
dtypes: object(3)
memory usage: 22.9+ MB
>>> df.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
column_1 1000000 non-null object
column_2 1000000 non-null object
column_3 1000000 non-null object
dtypes: object(3)
memory usage: 188.8 MB
"""
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(str(type(self)))
lines.append(self.index._summary())
if len(self.columns) == 0:
lines.append('Empty {name}'.format(name=type(self).__name__))
fmt.buffer_put_lines(buf, lines)
return
cols = self.columns
# hack
if max_cols is None:
max_cols = get_option('display.max_info_columns',
len(self.columns) + 1)
max_rows = get_option('display.max_info_rows', len(self) + 1)
if null_counts is None:
show_counts = ((len(self.columns) <= max_cols) and
(len(self) < max_rows))
else:
show_counts = null_counts
exceeds_info_cols = len(self.columns) > max_cols
def _verbose_repr():
lines.append('Data columns (total %d columns):' %
len(self.columns))
space = max(len(pprint_thing(k)) for k in self.columns) + 4
counts = None
tmpl = "{count}{dtype}"
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError(
'Columns must equal counts '
'({cols:d} != {counts:d})'.format(
cols=len(cols), counts=len(counts)))
tmpl = "{count} non-null {dtype}"
dtypes = self.dtypes
for i, col in enumerate(self.columns):
dtype = dtypes.iloc[i]
col = pprint_thing(col)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(_put_str(col, space) + tmpl.format(count=count,
dtype=dtype))
def _non_verbose_repr():
lines.append(self.columns._summary(name='Columns'))
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return ("{num:3.1f}{size_q} "
"{x}".format(num=num, size_q=size_qualifier, x=x))
num /= 1024.0
return "{num:3.1f}{size_q} {pb}".format(num=num,
size_q=size_qualifier,
pb='PB')
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
counts = self.get_dtype_counts()
dtypes = ['{k}({kk:d})'.format(k=k[0], kk=k[1]) for k
in sorted(compat.iteritems(counts))]
lines.append('dtypes: {types}'.format(types=', '.join(dtypes)))
if memory_usage is None:
memory_usage = get_option('display.memory_usage')
if memory_usage:
# append memory usage of df to display
size_qualifier = ''
if memory_usage == 'deep':
deep = True
else:
# size_qualifier is just a best effort; not guaranteed to catch
# all cases (e.g., it misses categorical data even with object
# categories)
deep = False
if ('object' in counts or
self.index._is_memory_usage_qualified()):
size_qualifier = '+'
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append("memory usage: {mem}\n".format(
mem=_sizeof_fmt(mem_usage, size_qualifier)))
fmt.buffer_put_lines(buf, lines)
def memory_usage(self, index=True, deep=False):
"""
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True`` the memory usage of the
index the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
sizes : Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
pandas.Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 (1+0j) 1 True
1 1 1.0 (1+0j) 1 True
2 1 1.0 (1+0j) 1 True
3 1 1.0 (1+0j) 1 True
4 1 1.0 (1+0j) 1 True
>>> df.memory_usage()
Index 80
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 80
int64 40000
float64 40000
complex128 80000
object 160000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5168
"""
result = Series([c.memory_usage(index=False, deep=deep)
for col, c in self.iteritems()], index=self.columns)
if index:
result = Series(self.index.memory_usage(deep=deep),
index=['Index']).append(result)
return result
def transpose(self, *args, **kwargs):
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
Parameters
----------
copy : bool, default False
If True, the underlying data is copied. Otherwise (default), no
copy is made if possible.
*args, **kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
The transposed DataFrame.
See Also
--------
numpy.transpose : Permute the dimensions of a given array.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the `object` dtype. In such a case, a copy of the data
is always made.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T # or df1.transpose()
>>> df1_transposed
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'name': ['Alice', 'Bob'],
... 'score': [9.5, 8],
... 'employed': [False, True],
... 'kids': [0, 0]}
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
0 Alice 9.5 False 0
1 Bob 8.0 True 0
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
0 1
name Alice Bob
score 9.5 8
employed False True
kids 0 0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the `object` dtype:
>>> df2.dtypes
name object
score float64
employed bool
kids int64
dtype: object
>>> df2_transposed.dtypes
0 object
1 object
dtype: object
"""
nv.validate_transpose(args, dict())
return super(DataFrame, self).transpose(1, 0, **kwargs)
T = property(transpose)
# ----------------------------------------------------------------------
# Picklability
# legacy pickle formats
def _unpickle_frame_compat(self, state): # pragma: no cover
if len(state) == 2: # pragma: no cover
series, idx = state
columns = sorted(series)
else:
series, cols, idx = state
columns = com._unpickle_array(cols)
index = com._unpickle_array(idx)
self._data = self._init_dict(series, index, columns, None)
def _unpickle_matrix_compat(self, state): # pragma: no cover
# old unpickling
(vals, idx, cols), object_state = state
index = com._unpickle_array(idx)
dm = DataFrame(vals, index=index, columns=com._unpickle_array(cols),
copy=False)
if object_state is not None:
ovals, _, ocols = object_state
objects = DataFrame(ovals, index=index,
columns=com._unpickle_array(ocols), copy=False)
dm = dm.join(objects)
self._data = dm._data
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, index, col, takeable=False):
"""Quickly retrieve single value at passed column and index
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(index, col, takeable=takeable)
def _get_value(self, index, col, takeable=False):
if takeable:
series = self._iget_item_cache(col)
return com.maybe_box_datetimelike(series._values[index])
series = self._get_item_cache(col)
engine = self.index._engine
try:
return engine.get_value(series._values, index)
except (TypeError, ValueError):
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self._get_value(index, col, takeable=True)
_get_value.__doc__ = get_value.__doc__
def set_value(self, index, col, value, takeable=False):
"""Put single value at passed column and index
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Returns
-------
frame : DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(index, col, value, takeable=takeable)
def _set_value(self, index, col, value, takeable=False):
try:
if takeable is True:
series = self._iget_item_cache(col)
return series._set_value(index, value, takeable=True)
series = self._get_item_cache(col)
engine = self.index._engine
engine.set_value(series._values, index, value)
return self
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
self.loc[index, col] = value
self._item_cache.pop(col, None)
return self
_set_value.__doc__ = set_value.__doc__
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
# irow
if axis == 0:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
if isinstance(i, slice):
return self[i]
else:
label = self.index[i]
if isinstance(label, Index):
# a location index by definition
result = self.take(i, axis=axis)
copy = True
else:
new_values = self._data.fast_xs(i)
if is_scalar(new_values):
return new_values
# if we are a copy, mark as such
copy = (isinstance(new_values, np.ndarray) and
new_values.base is None)
result = self._constructor_sliced(new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
label = self.columns[i]
if isinstance(i, slice):
# need to return view
lab_slice = slice(label[0], label[-1])
return self.loc[:, lab_slice]
else:
if isinstance(label, Index):
return self._take(i, axis=1)
index_len = len(self.index)
# if the values returned are not the same length
# as the index (iow a not found value), iget returns
# a 0-len ndarray. This is effectively catching
# a numpy error (as numpy should really raise)
values = self._data.iget(i)
if index_len and not len(values):
values = np.array([np.nan] * index_len, dtype=object)
result = self._box_col_values(values, label)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def __getitem__(self, key):
key = com.apply_if_callable(key, self)
# shortcut if the key is in columns
try:
if self.columns.is_unique and key in self.columns:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
return self._get_item_cache(key)
except (TypeError, ValueError):
# The TypeError correctly catches non hashable "key" (e.g. list)
# The ValueError can be removed once GH #21729 is fixed
pass
# Do we have a slicer (on rows)?
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._slice(indexer, axis=0)
# Do we have a (boolean) DataFrame?
if isinstance(key, DataFrame):
return self._getitem_frame(key)
# Do we have a (boolean) 1d indexer?
if com.is_bool_indexer(key):
return self._getitem_bool_array(key)
# We are left with two options: a single key, and a collection of keys,
# We interpret tuples as collections only for non-MultiIndex
is_single_key = isinstance(key, tuple) or not is_list_like(key)
if is_single_key:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
indexer = self.columns.get_loc(key)
if is_integer(indexer):
indexer = [indexer]
else:
if is_iterator(key):
key = list(key)
indexer = self.loc._convert_to_indexer(key, axis=1,
raise_missing=True)
# take() does not accept boolean indexers
if getattr(indexer, "dtype", None) == bool:
indexer = np.where(indexer)[0]
data = self._take(indexer, axis=1)
if is_single_key:
# What does looking for a single key in a non-unique index return?
# The behavior is inconsistent. It returns a Series, except when
# - the key itself is repeated (test on data.shape, #9519), or
# - we have a MultiIndex on columns (test on self.columns, #21309)
if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex):
data = data[key]
return data
def _getitem_bool_array(self, key):
# also raises Exception if object array with NA values
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn("Boolean Series key will be reindexed to match "
"DataFrame index.", UserWarning, stacklevel=3)
elif len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d.' %
(len(key), len(self.index)))
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self._take(indexer, axis=0)
def _getitem_multilevel(self, key):
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(new_values, index=self.index,
columns=result_columns)
result = result.__finalize__(self)
# If there is only one column being returned, and its name is
# either an empty string, or a tuple with an empty string as its
# first element, then treat the empty string as a placeholder
# and return the column as if the user had provided that empty
# string in the key. If the result is a Series, exclude the
# implied empty string from its name.
if len(result.columns) == 1:
top = result.columns[0]
if isinstance(top, tuple):
top = top[0]
if top == '':
result = result['']
if isinstance(result, Series):
result = self._constructor_sliced(result,
index=self.index,
name=key)
result._set_is_copy(self)
return result
else:
return self._get_item_cache(key)
def _getitem_frame(self, key):
if key.values.size and not is_bool_dtype(key.values):
raise ValueError('Must pass DataFrame with boolean values only')
return self.where(key)
def query(self, expr, inplace=False, **kwargs):
"""Query the columns of a frame with a boolean expression.
Parameters
----------
expr : string
The query string to evaluate. You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy
.. versionadded:: 0.18.0
kwargs : dict
See the documentation for :func:`pandas.eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
q : DataFrame
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`pandas.eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
See Also
--------
pandas.eval
DataFrame.eval
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 2), columns=list('ab'))
>>> df.query('a > b')
>>> df[df.a > df.b] # same result as the previous expression
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(expr, compat.string_types):
msg = "expr must be a string to be evaluated, {0} given"
raise ValueError(msg.format(type(expr)))
kwargs['level'] = kwargs.pop('level', 0) + 1
kwargs['target'] = None
res = self.eval(expr, **kwargs)
try:
new_data = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
new_data = self[res]
if inplace:
self._update_inplace(new_data)
else:
return new_data
def eval(self, expr, inplace=False, **kwargs):
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
.. versionadded:: 0.18.0.
kwargs : dict
See the documentation for :func:`~pandas.eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ndarray, scalar, or pandas object
The result of the evaluation.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
pandas.eval : Evaluate a Python expression as a string using various
backends.
Notes
-----
For more details see the API documentation for :func:`~pandas.eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, 'inplace')
resolvers = kwargs.pop('resolvers', None)
kwargs['level'] = kwargs.pop('level', 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
resolvers = dict(self.iteritems()), index_resolvers
if 'target' not in kwargs:
kwargs['target'] = self
kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None):
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
Returns
-------
subset : DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# convert the myriad valid dtypes object to a single representation
include, exclude = map(
lambda x: frozenset(map(_get_dtype_from_object, x)), selection)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError('include and exclude overlap on {inc_ex}'.format(
inc_ex=(include & exclude)))
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
# case 1: empty include, nonempty exclude
# we have True, True, ... True for include, same for exclude
# in the loop below we get the excluded
# and when we call '&' below we get only the excluded
# case 2: nonempty include, empty exclude
# same as case 1, but with include
# case 3: both nonempty
# the "union" of the logic of case 1 and case 2:
# we get the included and excluded, and return their logical and
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(idx, dtype):
return idx, functools.partial(issubclass, dtype.type)
for idx, f in itertools.starmap(is_dtype_instance_mapper,
enumerate(self.dtypes)):
if include: # checks for the case of empty include or exclude
include_these.iloc[idx] = any(map(f, include))
if exclude:
exclude_these.iloc[idx] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[com.get_info_slice(self, dtype_indexer)]
def _box_item_values(self, key, values):
items = self.columns[self.columns.get_loc(key)]
if values.ndim == 2:
return self._constructor(values.T, columns=items, index=self.index)
else:
return self._box_col_values(values, items)
def _box_col_values(self, values, items):
""" provide boxed values for a column """
klass = _get_sliced_frame_result_type(values, self)
return klass(values, index=self.index, name=items, fastpath=True)
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, 'ndim', None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key, value):
self._check_setitem_copy()
self.loc._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d!' %
(len(key), len(self.index)))
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.loc._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError('Columns must be same length as key')
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
indexer = self.loc._convert_to_indexer(key, axis=1)
self._check_setitem_copy()
self.loc._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError(
'Array conditional must be same shape as self'
)
key = self._constructor(key, **self._construct_axes_dict())
if key.values.size and not is_bool_dtype(key.values):
raise TypeError(
'Must pass DataFrame or 2-d ndarray with boolean values only'
)
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _ensure_valid_index(self, value):
"""
ensure that if we don't have an index, that we can create one from the
passed value
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value):
try:
value = Series(value)
except:
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
self._data = self._data.reindex_axis(value.index.copy(), axis=1,
fill_value=np.nan)
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def insert(self, loc, column, value, allow_duplicates=False):
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns)
column : string, number, or hashable object
label of the inserted column
value : int, Series, or array-like
allow_duplicates : bool, optional
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._data.insert(loc, column, value,
allow_duplicates=allow_duplicates)
def assign(self, **kwargs):
r"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
kwargs : keyword, value pairs
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
df : DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
For Python 3.6 and above, later items in '\*\*kwargs' may refer to
newly created or modified columns in 'df'; items are computed and
assigned into 'df' in order. For Python 3.5 and below, the order of
keyword arguments is not specified, you cannot refer to newly created
or modified columns. All items are computed first, and then assigned
in alphabetical order.
.. versionchanged :: 0.23.0
Keyword argument order is maintained for Python 3.6 and later.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 11), 'B': np.random.randn(10)})
Where the value is a callable, evaluated on `df`:
>>> df.assign(ln_A = lambda x: np.log(x.A))
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
Where the value already exists and is inserted:
>>> newcol = np.log(df['A'])
>>> df.assign(ln_A=newcol)
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
Where the keyword arguments depend on each other
>>> df = pd.DataFrame({'A': [1, 2, 3]})
>>> df.assign(B=df.A, C=lambda x:x['A']+ x['B'])
A B C
0 1 1 2
1 2 2 4
2 3 3 6
"""
data = self.copy()
# >= 3.6 preserve order of kwargs
if PY36:
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
else:
# <= 3.5: do all calculations first...
results = OrderedDict()
for k, v in kwargs.items():
results[k] = com.apply_if_callable(v, data)
# <= 3.5 and earlier
results = sorted(results.items())
# ... and then assign
for k, v in results:
data[k] = v
return data
def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
sanitized_column : numpy-array
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except Exception as e:
# duplicate axis
if not value.index.is_unique:
raise e
# other
raise TypeError('incompatible index of inserted column '
'with frame index')
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, ExtensionArray):
from pandas.core.series import _sanitize_index
# Explicitly copy here, instead of in _sanitize_index,
# as sanitize_index won't copy an EA, even with copy=True
value = value.copy()
value = _sanitize_index(value, self.index, copy=False)
elif isinstance(value, Index) or is_sequence(value):
from pandas.core.series import _sanitize_index
# turn me into an ndarray
value = _sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com.asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
# cast ignores pandas dtypes. so save the dtype first
infer_dtype, _ = infer_dtype_from_scalar(
value, pandas_dtype=True)
# upcast
value = cast_scalar_to_array(len(self.index), value)
value = maybe_cast_to_datetime(value, infer_dtype)
# return internal types directly
if is_extension_type(value) or is_extension_array_dtype(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if (not self.columns.is_unique or
isinstance(self.columns, MultiIndex)):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
result = {}
for idx, item in enumerate(self.columns):
result[item] = Series(self._data.iget(idx), index=self.index,
name=item)
return result
def lookup(self, row_labels, col_labels):
"""Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup
col_labels : sequence
The column labels to use for lookup
Notes
-----
Akin to::
result = []
for row, col in zip(row_labels, col_labels):
result.append(df.get_value(row, col))
Examples
--------
values : ndarray
The found values
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError('Row labels must have same size as column labels')
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError('One or more row labels was not found')
if (cidx == -1).any():
raise KeyError('One or more column labels was not found')
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype='O')
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,
copy):
frame = self
columns = axes['columns']
if columns is not None:
frame = frame._reindex_columns(columns, method, copy, level,
fill_value, limit, tolerance)
index = axes['index']
if index is not None:
frame = frame._reindex_index(index, method, copy, level,
fill_value, limit, tolerance)
return frame
def _reindex_index(self, new_index, method, copy, level, fill_value=np.nan,
limit=None, tolerance=None):
new_index, indexer = self.index.reindex(new_index, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({0: [new_index, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_columns(self, new_columns, method, copy, level,
fill_value=None, limit=None, tolerance=None):
new_columns, indexer = self.columns.reindex(new_columns, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({1: [new_columns, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_multi(self, axes, copy, fill_value):
""" we are guaranteed non-Nones in the axes! """
new_index, row_indexer = self.index.reindex(axes['index'])
new_columns, col_indexer = self.columns.reindex(axes['columns'])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(self.values, indexer,
fill_value=fill_value)
return self._constructor(new_values, index=new_index,
columns=new_columns)
else:
return self._reindex_with_indexers({0: [new_index, row_indexer],
1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value)
@Appender(_shared_docs['align'] % _shared_doc_kwargs)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
return super(DataFrame, self).align(other, join=join, axis=axis,
level=level, copy=copy,
fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
@rewrite_axis_style_signature('labels', [('method', None),
('copy', True),
('level', None),
('fill_value', np.nan),
('limit', None),
('tolerance', None)])
def reindex(self, *args, **kwargs):
axes = validate_axis_style_args(self, args, kwargs, 'labels',
'reindex')
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop('axis', None)
kwargs.pop('labels', None)
return super(DataFrame, self).reindex(**kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(DataFrame,
self).reindex_axis(labels=labels, axis=axis,
method=method, level=level, copy=copy,
limit=limit, fill_value=fill_value)
def drop(self, labels=None, axis=0, index=None, columns=None,
level=None, inplace=False, errors='raise'):
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index, columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
.. versionadded:: 0.21.0
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
dropped : pandas.DataFrame
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns
Series.drop : Return Series with specified index labels removed.
Raises
------
KeyError
If none of the labels are found in the selected axis
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3,4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... labels=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3,0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
return super(DataFrame, self).drop(labels=labels, axis=axis,
index=index, columns=columns,
level=level, inplace=inplace,
errors=errors)
@rewrite_axis_style_signature('mapper', [('copy', True),
('inplace', False),
('level', None)])
def rename(self, *args, **kwargs):
"""Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper, index, columns : dict-like or function, optional
dict-like or functions transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
axis : int or str, optional
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
Whether to return a new DataFrame. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
Returns
-------
renamed : DataFrame
See Also
--------
pandas.DataFrame.rename_axis
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
axes = validate_axis_style_args(self, args, kwargs, 'mapper', 'rename')
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop('axis', None)
kwargs.pop('mapper', None)
return super(DataFrame, self).rename(**kwargs)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.fillna.__doc__)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(DataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
@Appender(_shared_docs['replace'] % _shared_doc_kwargs)
def replace(self, to_replace=None, value=None, inplace=False, limit=None,
regex=False, method='pad'):
return super(DataFrame, self).replace(to_replace=to_replace,
value=value, inplace=inplace,
limit=limit, regex=regex,
method=method)
@Appender(_shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0):
return super(DataFrame, self).shift(periods=periods, freq=freq,
axis=axis)
def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
"""
Set the DataFrame index (row labels) using one or more existing
columns. By default yields a new object.
Parameters
----------
keys : column label or list of column labels / arrays
drop : boolean, default True
Delete columns to be used as the new index
append : boolean, default False
Whether to append columns to existing index
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
verify_integrity : boolean, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale':[55, 40, 84, 31]})
month sale year
0 1 55 2012
1 4 40 2014
2 7 84 2013
3 10 31 2014
Set the index to become the 'month' column:
>>> df.set_index('month')
sale year
month
1 55 2012
4 40 2014
7 84 2013
10 31 2014
Create a multi-index using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a multi-index using a set of values and a column:
>>> df.set_index([[1, 2, 3, 4], 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Returns
-------
dataframe : DataFrame
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(keys, list):
keys = [keys]
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, MultiIndex):
# append all but the last column so we don't have to modify
# the end of this loop
for n in range(col.nlevels - 1):
arrays.append(col._get_level_values(n))
level = col._get_level_values(col.nlevels - 1)
names.extend(col.names)
elif isinstance(col, Series):
level = col._values
names.append(col.name)
elif isinstance(col, Index):
level = col
names.append(col.name)
elif isinstance(col, (list, np.ndarray, Index)):
level = col
names.append(None)
else:
level = frame[col]._values
names.append(col)
if drop:
to_remove.append(col)
arrays.append(level)
index = ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index[index.duplicated()].unique()
raise ValueError('Index has duplicate keys: {dup}'.format(
dup=duplicates))
for c in to_remove:
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""
For DataFrame with multi-level index, return new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default
drop : boolean, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
resetted : DataFrame
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
values = index._values
if not isinstance(index, (PeriodIndex, DatetimeIndex)):
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
if mask.any():
values, changed = maybe_upcast_putmask(
values, mask, np.nan)
return values
new_index = ibase.default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
if isinstance(self.index, MultiIndex):
names = [n if n is not None else ('level_%d' % i)
for (i, n) in enumerate(self.index.names)]
to_insert = lzip(self.index.levels, self.index.labels)
else:
default = 'index' if 'index' not in self else 'level_0'
names = ([default] if self.index.name is None
else [self.index.name])
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = (list(name) if isinstance(name, tuple)
else [name])
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError("col_fill=None is incompatible "
"with incomplete column name "
"{}".format(name))
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
# ----------------------------------------------------------------------
# Reindex-based selection methods
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
return super(DataFrame, self).isna()
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isnull(self):
return super(DataFrame, self).isnull()
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
return super(DataFrame, self).notna()
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notnull(self):
return super(DataFrame, self).notnull()
def dropna(self, axis=0, how='any', thresh=None, subset=None,
inplace=False):
"""
Remove missing values.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
.. deprecated:: 0.23.0
Pass tuple or list to drop on multiple axes.
Only a single axis is allowed.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.isna: Indicate missing values.
DataFrame.notna : Indicate existing (non-missing) values.
DataFrame.fillna : Replace missing values.
Series.dropna : Drop missing values.
Index.dropna : Drop missing indices.
Examples
--------
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
... pd.NaT]})
>>> df
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(axis, (tuple, list)):
# GH20987
msg = ("supplying multiple axes to axis is deprecated and "
"will be removed in a future version.")
warnings.warn(msg, FutureWarning, stacklevel=2)
result = self
for ax in axis:
result = result.dropna(how=how, thresh=thresh, subset=subset,
axis=ax)
else:
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == 'all':
mask = count > 0
else:
if how is not None:
raise ValueError('invalid how option: {h}'.format(h=how))
else:
raise TypeError('must specify how or thresh')
result = self._take(mask.nonzero()[0], axis=axis)
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(self, subset=None, keep='first', inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
deduplicated : DataFrame
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
duplicated = self.duplicated(subset, keep=keep)
if inplace:
inds, = (-duplicated).nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated]
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : Series
"""
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))
return labels.astype('i8', copy=False), len(shape)
if subset is None:
subset = self.columns
elif (not np.iterable(subset) or
isinstance(subset, compat.string_types) or
isinstance(subset, tuple) and subset in self.columns):
subset = subset,
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = Index(subset).difference(self.columns)
if not diff.empty:
raise KeyError(diff)
vals = (col.values for name, col in self.iteritems()
if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index)
# ----------------------------------------------------------------------
# Sorting
@Appender(_shared_docs['sort_values'] % _shared_doc_kwargs)
def sort_values(self, by, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
stacklevel = 2 # Number of stack levels from df.sort_values
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError('Length of ascending (%d) != length of by (%d)' %
(len(ascending), len(by)))
if len(by) > 1:
from pandas.core.sorting import lexsort_indexer
keys = []
for x in by:
k = self._get_label_or_level_values(x, axis=axis,
stacklevel=stacklevel)
keys.append(k)
indexer = lexsort_indexer(keys, orders=ascending,
na_position=na_position)
indexer = ensure_platform_int(indexer)
else:
from pandas.core.sorting import nargsort
by = by[0]
k = self._get_label_or_level_values(by, axis=axis,
stacklevel=stacklevel)
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(k, kind=kind, ascending=ascending,
na_position=na_position)
new_data = self._data.take(indexer,
axis=self._get_block_manager_axis(axis),
verify=False)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
@Appender(_shared_docs['sort_index'] % _shared_doc_kwargs)
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
by=None):
# TODO: this can be combined with Series.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, 'inplace')
# 10726
if by is not None:
warnings.warn("by argument to sort_index is deprecated, "
"please use .sort_values(by=...)",
FutureWarning, stacklevel=2)
if level is not None:
raise ValueError("unable to simultaneously sort by and level")
return self.sort_values(by, axis=axis, ascending=ascending,
inplace=inplace)
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
labels = labels._sort_levels_monotonic()
if level is not None:
new_axis, indexer = labels.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
elif isinstance(labels, MultiIndex):
from pandas.core.sorting import lexsort_indexer
indexer = lexsort_indexer(labels._get_labels_for_sorting(),
orders=ascending,
na_position=na_position)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if ((ascending and labels.is_monotonic_increasing) or
(not ascending and labels.is_monotonic_decreasing)):
if inplace:
return
else:
return self.copy()
indexer = nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
baxis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer,
axis=baxis,
verify=False)
# reconstruct axis if needed
new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic()
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def sortlevel(self, level=0, axis=0, ascending=True, inplace=False,
sort_remaining=True):
"""Sort multilevel index by chosen axis and primary level. Data will be
lexicographically sorted by the chosen level followed by the other
levels (in order).
.. deprecated:: 0.20.0
Use :meth:`DataFrame.sort_index`
Parameters
----------
level : int
axis : {0 or 'index', 1 or 'columns'}, default 0
ascending : boolean, default True
inplace : boolean, default False
Sort the DataFrame without creating a new instance
sort_remaining : boolean, default True
Sort by the other levels too.
Returns
-------
sorted : DataFrame
See Also
--------
DataFrame.sort_index(level=...)
"""
warnings.warn("sortlevel is deprecated, use sort_index(level= ...)",
FutureWarning, stacklevel=2)
return self.sort_index(level=level, axis=axis, ascending=ascending,
inplace=inplace, sort_remaining=sort_remaining)
def nlargest(self, n, columns, keep='first'):
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- `first` : prioritize the first occurrence(s)
- `last` : prioritize the last occurrence(s)
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = pd.DataFrame({'a': [1, 10, 8, 11, 8, 2],
... 'b': list('abdcef'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0, 9.0]})
>>> df
a b c
0 1 a 1.0
1 10 b 2.0
2 8 d NaN
3 11 c 3.0
4 8 e 4.0
5 2 f 9.0
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "a".
>>> df.nlargest(3, 'a')
a b c
3 11 c 3.0
1 10 b 2.0
2 8 d NaN
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nlargest(3, 'a', keep='last')
a b c
3 11 c 3.0
1 10 b 2.0
4 8 e 4.0
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nlargest(3, 'a', keep='all')
a b c
3 11 c 3.0
1 10 b 2.0
2 8 d NaN
4 8 e 4.0
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nlargest(3, ['a', 'c'])
a b c
4 8 e 4.0
3 11 c 3.0
1 10 b 2.0
Attempting to use ``nlargest`` on non-numeric dtypes will raise a
``TypeError``:
>>> df.nlargest(3, 'b')
Traceback (most recent call last):
TypeError: Column 'b' has dtype object, cannot use method 'nlargest'
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nlargest()
def nsmallest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` smallest
values of `columns`.
Parameters
----------
n : int
Number of items to retrieve
columns : list or str
Column name or names to order by
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
Examples
--------
>>> df = pd.DataFrame({'a': [1, 10, 8, 11, 8, 2],
... 'b': list('abdcef'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0, 9.0]})
>>> df
a b c
0 1 a 1.0
1 10 b 2.0
2 8 d NaN
3 11 c 3.0
4 8 e 4.0
5 2 f 9.0
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(3, 'a')
a b c
0 1 a 1.0
5 2 f 9.0
2 8 d NaN
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nsmallest(3, 'a', keep='last')
a b c
0 1 a 1.0
5 2 f 9.0
4 8 e 4.0
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nsmallest(3, 'a', keep='all')
a b c
0 1 a 1.0
5 2 f 9.0
2 8 d NaN
4 8 e 4.0
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(3, ['a', 'c'])
a b c
0 1 a 1.0
5 2 f 9.0
4 8 e 4.0
Attempting to use ``nsmallest`` on non-numeric dtypes will raise a
``TypeError``:
>>> df.nsmallest(3, 'b')
Traceback (most recent call last):
TypeError: Column 'b' has dtype object, cannot use method 'nsmallest'
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nsmallest()
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : same type as caller (new object)
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
result = self.copy()
axis = self._get_axis_number(axis)
if axis == 0:
result.index = result.index.swaplevel(i, j)
else:
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0):
"""
Rearrange index levels using input order.
May not drop or duplicate levels
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : int
Where to reorder levels.
Returns
-------
type of caller (new object)
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis),
MultiIndex): # pragma: no cover
raise TypeError('Can only reorder levels on a hierarchical axis.')
result = self.copy()
if axis == 0:
result.index = result.index.reorder_levels(order)
else:
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic / combination related
def _combine_frame(self, other, func, fill_value=None, level=None):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
def _arith_op(left, right):
# for the mixed_type case where we iterate over columns,
# _arith_op(left, right) is equivalent to
# left._binop(right, func, fill_value=fill_value)
left, right = ops.fill_binop(left, right, fill_value)
return func(left, right)
if this._is_mixed_type or other._is_mixed_type:
# iterate over columns
if this.columns.is_unique:
# unique columns
result = {col: _arith_op(this[col], other[col])
for col in this}
result = self._constructor(result, index=new_index,
columns=new_columns, copy=False)
else:
# non-unique columns
result = {i: _arith_op(this.iloc[:, i], other.iloc[:, i])
for i, col in enumerate(this.columns)}
result = self._constructor(result, index=new_index, copy=False)
result.columns = new_columns
return result
else:
result = _arith_op(this.values, other.values)
return self._constructor(result, index=new_index, columns=new_columns,
copy=False)
def _combine_match_index(self, other, func, level=None):
left, right = self.align(other, join='outer', axis=0, level=level,
copy=False)
new_data = func(left.values.T, right.values).T
return self._constructor(new_data,
index=left.index, columns=self.columns,
copy=False)
def _combine_match_columns(self, other, func, level=None, try_cast=True):
left, right = self.align(other, join='outer', axis=1, level=level,
copy=False)
new_data = left._data.eval(func=func, other=right,
axes=[left.columns, self.index],
try_cast=try_cast)
return self._constructor(new_data)
def _combine_const(self, other, func, errors='raise', try_cast=True):
new_data = self._data.eval(func=func, other=other,
errors=errors,
try_cast=try_cast)
return self._constructor(new_data)
def _compare_frame(self, other, func, str_rep):
# compare_frame assumes self._indexed_same(other)
import pandas.core.computation.expressions as expressions
# unique
if self.columns.is_unique:
def _compare(a, b):
return {col: func(a[col], b[col]) for col in a.columns}
new_data = expressions.evaluate(_compare, str_rep, self, other)
return self._constructor(data=new_data, index=self.index,
columns=self.columns, copy=False)
# non-unique
else:
def _compare(a, b):
return {i: func(a.iloc[:, i], b.iloc[:, i])
for i, col in enumerate(a.columns)}
new_data = expressions.evaluate(_compare, str_rep, self, other)
result = self._constructor(data=new_data, index=self.index,
copy=False)
result.columns = self.columns
return result
def combine(self, other, func, fill_value=None, overwrite=True):
"""
Perform column-wise combine with another DataFrame based on a
passed function.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : boolean, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
result : DataFrame
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 NaN
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1],}, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1],}, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
# if we have different dtypes, possibly promote
new_dtype = this_dtype
if not is_dtype_equal(this_dtype, other_dtype):
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
# see if we need to be represented as i8 (datetimelike)
# try to keep us at this dtype
needs_i8_conversion_i = needs_i8_conversion(new_dtype)
if needs_i8_conversion_i:
arr = func(series, otherSeries, True)
else:
arr = func(series, otherSeries)
arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index,
columns=new_columns)._convert(datetime=True,
copy=False)
def combine_first(self, other):
"""
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
combined : DataFrame
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function
"""
import pandas.core.computation.expressions as expressions
def combiner(x, y, needs_i8_conversion=False):
x_values = x.values if hasattr(x, 'values') else x
y_values = y.values if hasattr(y, 'values') else y
if needs_i8_conversion:
mask = isna(x)
x_values = x_values.view('i8')
y_values = y_values.view('i8')
else:
mask = isna(x_values)
return expressions.where(mask, y_values, x_values)
return self.combine(other, combiner, overwrite=False)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> boolean 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
raise_conflict : bool, default False
If True, will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
Raises
------
ValueError
When `raise_conflict` is True and there's overlapping non-NA data.
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != 'left': # pragma: no cover
raise NotImplementedError("Only left join is supported")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col].values
that = other[col].values
if filter_func is not None:
with np.errstate(all='ignore'):
mask = ~filter_func(this) | isna(that)
else:
if raise_conflict:
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Data reshaping
def pivot(self, index=None, columns=None, values=None):
"""
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation, multiple values will result in a MultiIndex in the
columns. See the :ref:`User Guide <reshaping>` for more on reshaping.
Parameters
----------
index : string or object, optional
Column to use to make new frame's index. If None, uses
existing index.
columns : string or object
Column to use to make new frame's columns.
values : string, object or a list of the previous, optional
Column(s) to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns.
.. versionchanged :: 0.23.0
Also accept list of column names.
Returns
-------
DataFrame
Returns reshaped DataFrame.
Raises
------
ValueError:
When there are any `index`, `columns` combinations with multiple
values. `DataFrame.pivot_table` when you need to aggregate.
See Also
--------
DataFrame.pivot_table : generalization of pivot that can handle
duplicate values for one index/column pair.
DataFrame.unstack : pivot based on the index values instead of a
column.
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods.
Examples
--------
>>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz')
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])
baz zoo
bar A B C A B C
foo
one 1 2 3 x y z
two 4 5 6 q w t
A ValueError is raised if there are any duplicates.
>>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]})
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
Notice that the first two rows are the same for our `index`
and `columns` arguments.
>>> df.pivot(index='foo', columns='bar', values='baz')
Traceback (most recent call last):
...
ValueError: Index contains duplicate entries, cannot reshape
"""
from pandas.core.reshape.reshape import pivot
return pivot(self, index=index, columns=columns, values=values)
_shared_docs['pivot_table'] = """
Create a spreadsheet-style pivot table as a DataFrame. The levels in
the pivot table will be stored in MultiIndex objects (hierarchical
indexes) on the index and columns of the result DataFrame
Parameters
----------%s
values : column to aggregate, optional
index : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table index. If an array is passed,
it is being used as the same manner as column values.
columns : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table column. If an array is passed,
it is being used as the same manner as column values.
aggfunc : function, list of functions, dict, default numpy.mean
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
If dict is passed, the key is column to aggregate and value
is function or list of functions
fill_value : scalar, default None
Value to replace missing values with
margins : boolean, default False
Add all row / columns (e.g. for subtotal / grand totals)
dropna : boolean, default True
Do not include columns whose entries are all NaN
margins_name : string, default 'All'
Name of the row / column that will contain the totals
when margins is True.
Examples
--------
>>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7]})
>>> df
A B C D
0 foo one small 1
1 foo one large 2
2 foo one large 2
3 foo two small 3
4 foo two small 3
5 bar one large 4
6 bar one small 5
7 bar two small 6
8 bar two large 7
>>> table = pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
>>> table = pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
>>> table = pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': [min, max, np.mean]})
>>> table
D E
mean max median min
A C
bar large 5.500000 16 14.5 13
small 5.500000 15 14.5 14
foo large 2.000000 10 9.5 9
small 2.333333 12 11.0 8
Returns
-------
table : DataFrame
See also
--------
DataFrame.pivot : pivot without aggregation that can handle
non-numeric data
"""
@Substitution('')
@Appender(_shared_docs['pivot_table'])
def pivot_table(self, values=None, index=None, columns=None,
aggfunc='mean', fill_value=None, margins=False,
dropna=True, margins_name='All'):
from pandas.core.reshape.pivot import pivot_table
return pivot_table(self, values=values, index=index, columns=columns,
aggfunc=aggfunc, fill_value=fill_value,
margins=margins, dropna=dropna,
margins_name=margins_name)
def stack(self, level=-1, dropna=True):
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
The new index levels are sorted.
Parameters
----------
level : int, str, list, default -1
Level(s) to stack from the column axis onto the index
axis, defined as one index or label, or a list of indices
or labels.
dropna : bool, default True
Whether to drop rows in the resulting Frame/Series with
missing values. Stacking a column level onto the index
axis can create combinations of index and column values
that are missing from the original dataframe. See Examples
section.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being re-organised from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack()
cat weight 0
height 1
dog weight 2
height 3
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack()
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
**Prescribing the level(s) to be stacked**
The first parameter controls which level or levels are stacked:
>>> df_multi_level_cols2.stack(0)
kg m
cat height NaN 2.0
weight 1.0 NaN
dog height NaN 4.0
weight 3.0 NaN
>>> df_multi_level_cols2.stack([0, 1])
cat height m 2.0
weight kg 1.0
dog height m 4.0
weight kg 3.0
dtype: float64
**Dropping missing values**
>>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],
... index=['cat', 'dog'],
... columns=multicol2)
Note that rows where all values are missing are dropped by
default but this behaviour can be controlled via the dropna
keyword parameter:
>>> df_multi_level_cols3
weight height
kg m
cat NaN 1.0
dog 2.0 3.0
>>> df_multi_level_cols3.stack(dropna=False)
height weight
cat kg NaN NaN
m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
>>> df_multi_level_cols3.stack(dropna=True)
height weight
cat m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna)
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels. If the index is not a MultiIndex,
the output will be a Series (the analogue of stack when the columns are
not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded:: 0.18.0
See also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
Returns
-------
unstacked : DataFrame or Series
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
_shared_docs['melt'] = ("""
"Unpivots" a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
%(versionadded)s
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
See also
--------
%(other)s
pivot_table
DataFrame.pivot
Examples
--------
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)sid_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> %(caller)sid_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
""")
@Appender(_shared_docs['melt'] %
dict(caller='df.melt(',
versionadded='.. versionadded:: 0.20.0\n',
other='melt'))
def melt(self, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
from pandas.core.reshape.melt import melt
return melt(self, id_vars=id_vars, value_vars=value_vars,
var_name=var_name, value_name=value_name,
col_level=col_level)
# ----------------------------------------------------------------------
# Time series-related
def diff(self, periods=1, axis=0):
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another
element in the DataFrame (default is the element in the same column
of the previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative
values.
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
.. versionadded:: 0.16.1.
Returns
-------
diffed : DataFrame
See Also
--------
Series.diff: First discrete difference for a Series.
DataFrame.pct_change: Percent change over given number of periods.
DataFrame.shift: Shift index by desired number of periods with an
optional time freq.
Examples
--------
Difference with previous row
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]})
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(axis=1)
a b c
0 NaN 0.0 0.0
1 NaN -1.0 3.0
2 NaN -1.0 7.0
3 NaN -1.0 13.0
4 NaN 0.0 20.0
5 NaN 2.0 28.0
Difference with 3rd previous row
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
"""
bm_axis = self._get_block_manager_axis(axis)
new_data = self._data.diff(n=periods, axis=bm_axis)
return self._constructor(new_data)
# ----------------------------------------------------------------------
# Function application
def _gotitem(self,
key, # type: Union[str, List[str]]
ndim, # type: int
subset=None # type: Union[Series, DataFrame, None]
):
# type: (...) -> Union[Series, DataFrame]
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
elif subset.ndim == 1: # is Series
return subset
# TODO: _shallow_copy(subset)?
return subset[key]
_agg_doc = dedent("""
The aggregation operations are always performed over an axis, either the
index (default) or the column axis. This behavior is different from
`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,
`var`), where the default is to compute the aggregation of the flattened
array, e.g., ``numpy.mean(arr_2d)`` as opposed to ``numpy.mean(arr_2d,
axis=0)``.
`agg` is an alias for `aggregate`. Use the alias.
Examples
--------
>>> df = pd.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])
A B C
sum 12.0 15.0 18.0
min 1.0 2.0 3.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
max NaN 8.0
min 1.0 2.0
sum 12.0 NaN
Aggregate over the columns.
>>> df.agg("mean", axis="columns")
0 2.0
1 5.0
2 8.0
3 NaN
dtype: float64
See also
--------
DataFrame.apply : Perform any type of operations.
DataFrame.transform : Perform transformation type operations.
pandas.core.groupby.GroupBy : Perform operations over groups.
pandas.core.resample.Resampler : Perform operations over resampled bins.
pandas.core.window.Rolling : Perform operations over rolling window.
pandas.core.window.Expanding : Perform operations over expanding window.
pandas.core.window.EWM : Perform operation over exponential weighted
window.
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='.. versionadded:: 0.20.0',
**_shared_doc_kwargs))
def aggregate(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
result = None
try:
result, how = self._aggregate(func, axis=axis, *args, **kwargs)
except TypeError:
pass
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
return result
def _aggregate(self, arg, axis=0, *args, **kwargs):
if axis == 1:
# NDFrame.aggregate returns a tuple, and we need to transpose
# only result
result, how = (super(DataFrame, self.T)
._aggregate(arg, *args, **kwargs))
result = result.T if result is not None else result
return result, how
return super(DataFrame, self)._aggregate(arg, *args, **kwargs)
agg = aggregate
@Appender(_shared_docs['transform'] % _shared_doc_kwargs)
def transform(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
if axis == 1:
return super(DataFrame, self.T).transform(func, *args, **kwargs).T
return super(DataFrame, self).transform(func, *args, **kwargs)
def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None,
result_type=None, args=(), **kwds):
"""
Apply a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis=0``) or the DataFrame's columns
(``axis=1``). By default (``result_type=None``), the final return type
is inferred from the return type of the applied function. Otherwise,
it depends on the `result_type` argument.
Parameters
----------
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
broadcast : bool, optional
Only relevant for aggregation functions:
* ``False`` or ``None`` : returns a Series whose length is the
length of the index or the number of columns (based on the
`axis` parameter)
* ``True`` : results will be broadcast to the original shape
of the frame, the original index and columns will be retained.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='broadcast'.
raw : bool, default False
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` : the passed function will receive ndarray objects
instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
reduce : bool or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
`apply` will use `reduce` to determine whether the result
should be a Series or a DataFrame. If ``reduce=None`` (the
default), `apply`'s return value will be guessed by calling
`func` on an empty Series
(note: while guessing, exceptions raised by `func` will be
ignored).
If ``reduce=True`` a Series will always be returned, and if
``reduce=False`` a DataFrame will always be returned.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by ``result_type='reduce'``.
result_type : {'expand', 'reduce', 'broadcast', None}, default None
These only act when ``axis=1`` (columns):
* 'expand' : list-like results will be turned into columns.
* 'reduce' : returns a Series if possible rather than expanding
list-like results. This is the opposite of 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the DataFrame, the original index and columns will be
retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
.. versionadded:: 0.23.0
args : tuple
Positional arguments to pass to `func` in addition to the
array/series.
**kwds
Additional keyword arguments to pass as keywords arguments to
`func`.
Notes
-----
In the current implementation apply calls `func` twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
column/row.
See also
--------
DataFrame.applymap: For elementwise operations
DataFrame.aggregate: only perform aggregating type operations
DataFrame.transform: only perform transforming type operations
Examples
--------
>>> df = pd.DataFrame([[4, 9],] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
1 4 9
2 4 9
Using a numpy universal function (in this case the same as
``np.sqrt(df)``):
>>> df.apply(np.sqrt)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
Using a reducing function on either axis
>>> df.apply(np.sum, axis=0)
A 12
B 27
dtype: int64
>>> df.apply(np.sum, axis=1)
0 13
1 13
2 13
dtype: int64
Retuning a list-like will result in a Series
>>> df.apply(lambda x: [1, 2], axis=1)
0 [1, 2]
1 [1, 2]
2 [1, 2]
dtype: object
Passing result_type='expand' will expand list-like results
to columns of a Dataframe
>>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')
0 1
0 1 2
1 1 2
2 1 2
Returning a Series inside the function is similar to passing
``result_type='expand'``. The resulting column names
will be the Series index.
>>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
foo bar
0 1 2
1 1 2
2 1 2
Passing ``result_type='broadcast'`` will ensure the same shape
result, whether list-like or scalar is returned by the function,
and broadcast it along the axis. The resulting column names will
be the originals.
>>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')
A B
0 1 2
1 1 2
2 1 2
Returns
-------
applied : Series or DataFrame
"""
from pandas.core.apply import frame_apply
op = frame_apply(self,
func=func,
axis=axis,
broadcast=broadcast,
raw=raw,
reduce=reduce,
result_type=result_type,
args=args,
kwds=kwds)
return op.get_result()
def applymap(self, func):
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
Returns
-------
DataFrame
Transformed DataFrame.
See also
--------
DataFrame.apply : Apply a function along input axis of DataFrame
Examples
--------
>>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> df.applymap(lambda x: len(str(x)))
0 1
0 3 4
1 5 5
Note that a vectorized version of `func` often exists, which will
be much faster. You could square each number elementwise.
>>> df.applymap(lambda x: x**2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
But it's better to avoid applymap in that case.
>>> df ** 2
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func)
return lib.map_infer(x.astype(object).values, func)
return self.apply(infer)
# ----------------------------------------------------------------------
# Merging / joining methods
def append(self, other, ignore_index=False,
verify_integrity=False, sort=None):
"""
Append rows of `other` to the end of caller, returning a new object.
Columns in `other` that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
sort : boolean, default None
Sort columns if the columns of `self` and `other` are not aligned.
The default sorting is deprecated and will change to not-sorting
in a future version of pandas. Explicitly pass ``sort=True`` to
silence the warning and sort. Explicitly pass ``sort=False`` to
silence the warning and not sort.
.. versionadded:: 0.23.0
Returns
-------
appended : DataFrame
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
See also
--------
pandas.concat : General function to concatenate DataFrame, Series
or Panel objects
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError('Can only append a Series if ignore_index=True'
' or if the Series has a name')
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = Index([other.name], name=self.index.name)
idx_diff = other.index.difference(self.columns)
try:
combined_columns = self.columns.append(idx_diff)
except TypeError:
combined_columns = self.columns.astype(object).append(idx_diff)
other = other.reindex(combined_columns, copy=False)
other = DataFrame(other.values.reshape((1, len(other))),
index=index,
columns=combined_columns)
other = other._convert(datetime=True, timedelta=True)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list) and not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.loc[:, self.columns]
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self] + other
else:
to_concat = [self, other]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity,
sort=sort)
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
"""
Join columns with other DataFrame either on index or on a key
column. Efficiently Join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series with name field set, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame
on : name, tuple/list of names, or array-like
Column or index level name(s) in the caller to join on the index
in `other`, otherwise joins index-on-index. If multiple
values given, the `other` DataFrame must have a MultiIndex. Can
pass an array as the join key if it is not already contained in
the calling DataFrame. Like an Excel VLOOKUP operation
how : {'left', 'right', 'outer', 'inner'}, default: 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use other frame's index
* outer: form union of calling frame's index (or column if on is
specified) with other frame's index, and sort it
lexicographically
* inner: form intersection of calling frame's index (or column if
on is specified) with other frame's index, preserving the order
of the calling's one
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
sort : boolean, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword)
Notes
-----
on, lsuffix, and rsuffix options are not supported when passing a list
of DataFrame objects
Support for specifying index levels as the `on` parameter was added
in version 0.23.0
Examples
--------
>>> caller = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> caller
A key
0 A0 K0
1 A1 K1
2 A2 K2
3 A3 K3
4 A4 K4
5 A5 K5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
B key
0 B0 K0
1 B1 K1
2 B2 K2
Join DataFrames using their indexes.
>>> caller.join(other, lsuffix='_caller', rsuffix='_other')
>>> A key_caller B key_other
0 A0 K0 B0 K0
1 A1 K1 B1 K1
2 A2 K2 B2 K2
3 A3 K3 NaN NaN
4 A4 K4 NaN NaN
5 A5 K5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both caller and other. The joined DataFrame will have
key as its index.
>>> caller.set_index('key').join(other.set_index('key'))
>>> A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the on
parameter. DataFrame.join always uses other's index but we can use any
column in the caller. This method preserves the original caller's
index in the result.
>>> caller.join(other.set_index('key'), on='key')
>>> A key B
0 A0 K0 B0
1 A1 K1 B1
2 A2 K2 B2
3 A3 K3 NaN
4 A4 K4 NaN
5 A5 K5 NaN
See also
--------
DataFrame.merge : For column(s)-on-columns(s) operations
Returns
-------
joined : DataFrame
"""
# For SparseDataFrame's benefit
return self._join_compat(other, on=on, how=how, lsuffix=lsuffix,
rsuffix=rsuffix, sort=sort)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
from pandas.core.reshape.merge import merge
from pandas.core.reshape.concat import concat
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
return merge(self, other, left_on=on, how=how,
left_index=on is None, right_index=True,
suffixes=(lsuffix, rsuffix), sort=sort)
else:
if on is not None:
raise ValueError('Joining multiple DataFrames only supported'
' for joining on index')
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
# join indexes only using concat
if can_concat:
if how == 'left':
how = 'outer'
join_axes = [self.index]
else:
join_axes = None
return concat(frames, axis=1, join=how, join_axes=join_axes,
verify_integrity=True)
joined = frames[0]
for frame in frames[1:]:
joined = merge(joined, frame, how=how, left_index=True,
right_index=True)
return joined
@Substitution('')
@Appender(_merge_doc, indents=2)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
from pandas.core.reshape.merge import merge
return merge(self, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator, validate=validate)
def round(self, decimals=0, *args, **kwargs):
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
Examples
--------
>>> df = pd.DataFrame(np.random.random([3, 3]),
... columns=['A', 'B', 'C'], index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = pd.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1 0.17
second 0.0 1 0.58
third 0.9 0 0.49
Returns
-------
DataFrame object
See Also
--------
numpy.around
Series.round
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.iteritems():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = [col for col in _dict_round(self, decimals)]
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals)
for _, v in self.iteritems()]
else:
raise TypeError("decimals must be an integer, a dict-like or a "
"Series")
if len(new_cols) > 0:
return self._constructor(concat(new_cols, axis=1),
index=self.index,
columns=self.columns)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method='pearson', min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values
Parameters
----------
method : {'pearson', 'kendall', 'spearman'}
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for pearson
and spearman correlation
Returns
-------
y : DataFrame
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if method == 'pearson':
correl = libalgos.nancorr(ensure_float64(mat), minp=min_periods)
elif method == 'spearman':
correl = libalgos.nancorr_spearman(ensure_float64(mat),
minp=min_periods)
else:
if min_periods is None:
min_periods = 1
mat = ensure_float64(mat).T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
return self._constructor(correl, index=idx, columns=cols)
def cov(self, min_periods=None):
"""
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
See Also
--------
pandas.Series.cov : compute covariance with another Series
pandas.core.window.EWM.cov: exponential weighted sample covariance
pandas.core.window.Expanding.cov : expanding sample covariance
pandas.core.window.Rolling.cov : rolling sample covariance
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1.
For DataFrames that have Series that are missing data (assuming that
data is `missing at random
<https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)
the returned covariance matrix will be an unbiased estimate
of the variance and covariance between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be positive
semi-definite. This could lead to estimate correlations having
absolute values which are greater than one, and/or a non-invertible
covariance matrix. See `Estimation of covariance matrices
<http://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
matrices>`__ for more details.
Examples
--------
>>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
... columns=['dogs', 'cats'])
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(1000, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
b -0.020161 1.059352 -0.008543 -0.024738 0.009826
c 0.059277 -0.008543 1.010670 -0.001486 -0.000271
d -0.008943 -0.024738 -0.001486 0.921297 -0.013692
e 0.014144 0.009826 -0.000271 -0.013692 0.977795
**Minimum number of periods**
This method also supports an optional ``min_periods`` keyword
that specifies the required minimum number of non-NA observations for
each column pair in order to have a valid result:
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
>>> df.loc[df.index[:5], 'a'] = np.nan
>>> df.loc[df.index[5:10], 'b'] = np.nan
>>> df.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
baseCov = np.empty((mat.shape[1], mat.shape[1]))
baseCov.fill(np.nan)
else:
baseCov = np.cov(mat.T)
baseCov = baseCov.reshape((len(cols), len(cols)))
else:
baseCov = libalgos.nancorr(ensure_float64(mat), cov=True,
minp=min_periods)
return self._constructor(baseCov, index=idx, columns=cols)
def corrwith(self, other, axis=0, drop=False):
"""
Compute pairwise correlation between rows or columns of two DataFrame
objects.
Parameters
----------
other : DataFrame, Series
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' to compute column-wise, 1 or 'columns' for row-wise
drop : boolean, default False
Drop missing indices from result, default returns union of all
Returns
-------
correls : Series
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
if isinstance(other, Series):
return this.apply(other.corr, axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join='inner', copy=False)
# mask missing values
left = left + right * 0
right = right + left * 0
if axis == 1:
left = left.T
right = right.T
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
if not drop:
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
correl = correl.reindex(result_index)
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending
on `pandas.options.mode.use_inf_as_na`) are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each **row**.
level : int or str, optional
If the axis is a `MultiIndex` (hierarchical), count along a
particular `level`, collapsing into a `DataFrame`.
A `str` specifies the level name.
numeric_only : boolean, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series or DataFrame
For each column/row the number of non-NA/null entries.
If `level` is specified returns a `DataFrame`.
See Also
--------
Series.count: number of non-NA elements in a Series
DataFrame.shape: number of DataFrame rows and columns (including NA
elements)
DataFrame.isna: boolean same-sized DataFrame showing places of NA
elements
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame({"Person":
... ["John", "Myla", None, "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 None 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 4
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis='columns')
0 3
1 2
2 2
3 3
4 3
dtype: int64
Counts for one level of a `MultiIndex`:
>>> df.set_index(["Person", "Single"]).count(level="Person")
Age
Person
John 2
Myla 1
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis,
numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = Series(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type or frame._data.any_extension_types:
# the or any_extension_types is really only hit for single-
# column frames with an extension array
result = notna(frame).sum(axis=axis)
else:
# GH13407
series_counts = notna(frame).sum(axis=axis)
counts = series_counts.values
result = Series(counts, index=frame._get_agg_axis(axis))
return result.astype('int64')
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError("Can only count levels on hierarchical "
"{ax}.".format(ax=self._get_axis_name(axis)))
if frame._is_mixed_type:
# Since we have mixed types, calling notna(frame.values) might
# upcast everything to object
mask = notna(frame).values
else:
# But use the speedup when we have homogeneous dtypes
mask = notna(frame.values)
if axis == 1:
# We're transposing the mask rather than frame to avoid potential
# upcasts to object, which induces a ~20x slowdown
mask = mask.T
if isinstance(level, compat.string_types):
level = count_axis._get_level_number(level)
level_index = count_axis.levels[level]
labels = ensure_int64(count_axis.labels[level])
counts = lib.count_level_2d(mask, labels, len(level_index), axis=0)
result = DataFrame(counts, index=level_index, columns=agg_axis)
if axis == 1:
# Undo our earlier transpose
return result.T
else:
return result
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
if axis is None and filter_type == 'bool':
labels = None
constructor = None
else:
# TODO: Make other agg func handle axis=None properly
axis = self._get_axis_number(axis)
labels = self._get_agg_axis(axis)
constructor = self._constructor
def f(x):
return op(x, axis=axis, skipna=skipna, **kwds)
# exclude timedelta/datetime unless we are uniform types
if axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type:
numeric_only = True
if numeric_only is None:
try:
values = self.values
result = f(values)
if (filter_type == 'bool' and is_object_dtype(values) and
axis is None):
# work around https://github.com/numpy/numpy/issues/10489
# TODO: combine with hasattr(result, 'dtype') further down
# hard since we don't have `values` down there.
result = np.bool_(result)
except Exception as e:
# try by-column first
if filter_type is None and axis == 0:
try:
# this can end up with a non-reduction
# but not always. if the types are mixed
# with datelike then need to make sure a series
# we only end up here if we have not specified
# numeric_only and yet we have tried a
# column-by-column reduction, where we have mixed type.
# So let's just do what we can
from pandas.core.apply import frame_apply
opa = frame_apply(self,
func=f,
result_type='expand',
ignore_failures=True)
result = opa.get_result()
if result.ndim == self.ndim:
result = result.iloc[0]
return result
except Exception:
pass
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
e = NotImplementedError(
"Handling exception with filter_type {f} not"
"implemented.".format(f=filter_type))
raise_with_traceback(e)
with np.errstate(all='ignore'):
result = f(data.values)
labels = data._get_agg_axis(axis)
else:
if numeric_only:
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
msg = ("Generating numeric_only data with filter_type {f}"
"not supported.".format(f=filter_type))
raise NotImplementedError(msg)
values = data.values
labels = data._get_agg_axis(axis)
else:
values = self.values
result = f(values)
if hasattr(result, 'dtype') and is_object_dtype(result.dtype):
try:
if filter_type is None or filter_type == 'numeric':
result = result.astype(np.float64)
elif filter_type == 'bool' and notna(result).all():
result = result.astype(np.bool_)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = coerce_to_dtypes(result, self.dtypes)
if constructor is not None:
result = Series(result, index=labels)
return result
def nunique(self, axis=0, dropna=True):
"""
Return Series with number of distinct observations over requested
axis.
.. versionadded:: 0.20.0
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
dropna : boolean, default True
Don't include NaN in the counts.
Returns
-------
nunique : Series
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
>>> df.nunique(axis=1)
0 1
1 2
2 2
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis=0, skipna=True):
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Raises
------
ValueError
* If the row/column is empty
Returns
-------
idxmin : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
See Also
--------
Series.idxmin
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True):
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Raises
------
ValueError
* If the row/column is empty
Returns
-------
idxmax : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
See Also
--------
Series.idxmax
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num):
""" let's be explicit about this """
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)
def mode(self, axis=0, numeric_only=False, dropna=True):
"""
Gets the mode(s) of each element along the axis selected. Adds a row
for each mode per label, fills in gaps with nan.
Note that there could be multiple values returned for the selected
axis (when more than one item share the maximum frequency), which is
the reason why a dataframe is returned. If you want to impute missing
values with the mode in a dataframe ``df``, you can just do this:
``df.fillna(df.mode().iloc[0])``
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row
numeric_only : boolean, default False
if True, only apply to numeric columns
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : DataFrame (sorted)
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 1, 2, 1, 2, 3]})
>>> df.mode()
A
0 1
1 2
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode(dropna=dropna)
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
"""
Return values at the given quantile over requested axis, a la
numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
numeric_only : boolean, default True
If False, the quantile of datetime and timedelta data will be
computed as well
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantiles : Series or DataFrame
- If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
- If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = pd.DataFrame({'A': [1, 2],
'B': [pd.Timestamp('2010'),
pd.Timestamp('2011')],
'C': [pd.Timedelta('1 days'),
pd.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
See Also
--------
pandas.core.window.Rolling.quantile
"""
self._check_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
result = data._data.quantile(qs=q,
axis=1,
interpolation=interpolation,
transposed=is_transposed)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
def to_timestamp(self, freq=None, how='start', axis=0, copy=True):
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period
Parameters
----------
freq : string, default frequency of PeriodIndex
Desired frequency
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If false then underlying input data is not copied
Returns
-------
df : DataFrame with DatetimeIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))
elif axis == 1:
new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got {ax!s}'.format(
ax=axis))
return self._constructor(new_data)
def to_period(self, freq=None, axis=0, copy=True):
"""
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed)
Parameters
----------
freq : string, default
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If False then underlying input data is not copied
Returns
-------
ts : TimeSeries with PeriodIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_period(freq=freq))
elif axis == 1:
new_data.set_axis(0, self.columns.to_period(freq=freq))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got {ax!s}'.format(
ax=axis))
return self._constructor(new_data)
def isin(self, values):
"""
Return boolean DataFrame showing whether each element in the
DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dictionary
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dictionary, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame of booleans
Examples
--------
When ``values`` is a list:
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> df.isin([1, 3, 12, 'a'])
A B
0 True True
1 False False
2 True False
When ``values`` is a dict:
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 4, 7]})
>>> df.isin({'A': [1, 3], 'B': [4, 7, 12]})
A B
0 True False # Note that B didn't match the 1 here.
1 False True
2 True True
When ``values`` is a Series or DataFrame:
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> df2 = pd.DataFrame({'A': [1, 3, 3, 2], 'B': ['e', 'f', 'f', 'e']})
>>> df.isin(df2)
A B
0 True False
1 False False # Column A in `df2` has a 3, but not at index 1.
2 True True
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
return concat((self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)), axis=1)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self), axis='index')
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError("only list-like or dict-like objects are "
"allowed to be passed to DataFrame.isin(), "
"you passed a "
"{0!r}".format(type(values).__name__))
return DataFrame(
algorithms.isin(self.values.ravel(),
values).reshape(self.shape), self.index,
self.columns)
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = CachedAccessor("plot", gfx.FramePlotMethods)
hist = gfx.hist_frame
boxplot = gfx.boxplot_frame
DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0,
axes_are_reversed=True, aliases={'rows': 0},
docs={
'index': 'The index (row labels) of the DataFrame.',
'columns': 'The column labels of the DataFrame.'})
DataFrame._add_numeric_operations()
DataFrame._add_series_or_dataframe_operations()
ops.add_flex_arithmetic_methods(DataFrame)
ops.add_special_arithmetic_methods(DataFrame)
def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
# from BlockManager perspective
axes = [ensure_index(columns), ensure_index(index)]
return create_block_manager_from_arrays(arrays, arr_names, axes)
def extract_index(data):
from pandas.core.index import _union_indexes
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_series = False
have_dicts = False
for v in data:
if isinstance(v, Series):
have_series = True
indexes.append(v.index)
elif isinstance(v, dict):
have_dicts = True
indexes.append(list(v.keys()))
elif is_list_like(v) and getattr(v, 'ndim', 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(v))
if not indexes and not raw_lengths:
raise ValueError('If using all scalar values, you must pass'
' an index')
if have_series or have_dicts:
index = _union_indexes(indexes)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('arrays must all be same length')
if have_dicts:
raise ValueError('Mixing dicts with non-Series may lead to '
'ambiguous ordering.')
if have_series:
if lengths[0] != len(index):
msg = ('array length %d does not match index length %d' %
(lengths[0], len(index)))
raise ValueError(msg)
else:
index = ibase.default_index(lengths[0])
return ensure_index(index)
def _prep_ndarray(values, copy=True):
if not isinstance(values, (np.ndarray, Series, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
def convert(v):
return maybe_convert_platform(v)
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
try:
if is_list_like(values[0]) or hasattr(values[0], 'len'):
values = np.array([convert(v) for v in values])
elif isinstance(values[0], np.ndarray) and values[0].ndim == 0:
# GH#21861
values = np.array([convert(v) for v in values])
else:
values = convert(values)
except:
values = convert(values)
else:
# drop subclass info, do not copy data
values = np.asarray(values)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError('Must pass 2-d input')
return values
def _to_arrays(data, columns, coerce_float=False, dtype=None):
"""
Return list of arrays, columns
"""
if isinstance(data, DataFrame):
if columns is not None:
arrays = [data._ixs(i, axis=1).values
for i, col in enumerate(data.columns) if col in columns]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
columns = data.dtype.names
if columns is not None:
return [[]] * len(columns), columns
return [], [] # columns if columns is not None else []
if isinstance(data[0], (list, tuple)):
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], collections.Mapping):
return _list_of_dict_to_arrays(data, columns,
coerce_float=coerce_float, dtype=dtype)
elif isinstance(data[0], Series):
return _list_of_series_to_arrays(data, columns,
coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], Categorical):
if columns is None:
columns = ibase.default_index(len(data))
return data, columns
elif (isinstance(data, (np.ndarray, Series, Index)) and
data.dtype.names is not None):
columns = list(data.dtype.names)
arrays = [data[k] for k in columns]
return arrays, columns
else:
# last ditch effort
data = lmap(tuple, data)
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
def _masked_rec_array_to_mgr(data, index, columns, dtype, copy):
""" extract from a masked rec array and create the manager """
# essentially process a record array then fill it
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = _get_names_from_index(fdata)
if index is None:
index = ibase.default_index(len(data))
index = ensure_index(index)
if columns is not None:
columns = ensure_index(columns)
arrays, arr_columns = _to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for fv, arr, col in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = _reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = _arrays_to_mgr(arrays, arr_columns, index, columns)
if copy:
mgr = mgr.copy()
return mgr
def _reorder_arrays(arrays, arr_columns, columns):
# reorder according to the columns
if (columns is not None and len(columns) and arr_columns is not None and
len(arr_columns)):
indexer = ensure_index(arr_columns).get_indexer(columns)
arr_columns = ensure_index([arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
def _list_to_arrays(data, columns, coerce_float=False, dtype=None):
if len(data) > 0 and isinstance(data[0], tuple):
content = list(lib.to_object_array_tuples(data).T)
else:
# list of lists
content = list(lib.to_object_array(data).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):
from pandas.core.index import _get_objs_combined_axis
if columns is None:
columns = _get_objs_combined_axis(data, sort=False)
indexer_cache = {}
aligned_values = []
for s in data:
index = getattr(s, 'index', None)
if index is None:
index = ibase.default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = com.values_from_object(s)
aligned_values.append(algorithms.take_1d(values, indexer))
values = np.vstack(aligned_values)
if values.dtype == np.object_:
content = list(values.T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
else:
return values.T, columns
def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):
if columns is None:
gen = (list(x.keys()) for x in data)
sort = not any(isinstance(d, OrderedDict) for d in data)
columns = lib.fast_unique_multiple_list_gen(gen, sort=sort)
# assure that they are of the base dict class and not of derived
# classes
data = [(type(d) is dict) and d or dict(d) for d in data]
content = list(lib.dicts_to_array(data, list(columns)).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _convert_object_array(content, columns, coerce_float=False, dtype=None):
if columns is None:
columns = ibase.default_index(len(content))
else:
if len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError('{col:d} columns passed, passed data had '
'{con} columns'.format(col=len(columns),
con=len(content)))
# provide soft conversion of object dtypes
def convert(arr):
if dtype != object and dtype != np.object:
arr = lib.maybe_convert_objects(arr, try_float=coerce_float)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays, columns
def _get_names_from_index(data):
has_some_name = any(getattr(s, 'name', None) is not None for s in data)
if not has_some_name:
return ibase.default_index(len(data))
index = lrange(len(data))
count = 0
for i, s in enumerate(data):
n = getattr(s, 'name', None)
if n is not None:
index[i] = n
else:
index[i] = 'Unnamed %d' % count
count += 1
return index
def _homogenize(data, index, dtype=None):
from pandas.core.series import _sanitize_array
oindex = None
homogenized = []
for v in data:
if isinstance(v, Series):
if dtype is not None:
v = v.astype(dtype)
if v.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
v = v.reindex(index, copy=False)
else:
if isinstance(v, dict):
if oindex is None:
oindex = index.astype('O')
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
v = com.dict_compat(v)
else:
v = dict(v)
v = lib.fast_multiget(v, oindex.values, default=np.nan)
v = _sanitize_array(v, index, dtype=dtype, copy=False,
raise_cast_failure=False)
homogenized.append(v)
return homogenized
def _from_nested_dict(data):
# TODO: this should be seriously cythonized
new_data = OrderedDict()
for index, s in compat.iteritems(data):
for col, v in compat.iteritems(s):
new_data[col] = new_data.get(col, OrderedDict())
new_data[col][index] = v
return new_data
def _put_str(s, space):
return u'{s}'.format(s=s)[:space].ljust(space)
| 36.561692
| 169
| 0.532059
|
d7e9eff463b2892c2d8827ec61ac2ac40f295522
| 10,123
|
py
|
Python
|
rdkit/ML/NaiveBayes/ClassificationModel.py
|
kazuyaujihara/rdkit
|
06027dcd05674787b61f27ba46ec0d42a6037540
|
[
"BSD-3-Clause"
] | 1,609
|
2015-01-05T02:41:13.000Z
|
2022-03-30T21:57:24.000Z
|
rdkit/ML/NaiveBayes/ClassificationModel.py
|
kazuyaujihara/rdkit
|
06027dcd05674787b61f27ba46ec0d42a6037540
|
[
"BSD-3-Clause"
] | 3,412
|
2015-01-06T12:13:33.000Z
|
2022-03-31T17:25:41.000Z
|
rdkit/ML/NaiveBayes/ClassificationModel.py
|
kazuyaujihara/rdkit
|
06027dcd05674787b61f27ba46ec0d42a6037540
|
[
"BSD-3-Clause"
] | 811
|
2015-01-11T03:33:48.000Z
|
2022-03-28T11:57:49.000Z
|
# $Id$
#
# Copyright (C) 2004-2008 Greg Landrum and Rational Discovery LLC
# All Rights Reserved
#
""" Defines Naive Baysean classification model
Based on development in: Chapter 6 of "Machine Learning" by Tom Mitchell
"""
import numpy
from rdkit.ML.Data import Quantize
def _getBinId(val, qBounds):
bid = 0
for bnd in qBounds:
if (val > bnd):
bid += 1
return bid
# FIX: this class has not been updated to new-style classes
# (RD Issue380) because that would break all of our legacy pickled
# data. Until a solution is found for this breakage, an update is
# impossible.
class NaiveBayesClassifier:
"""
_NaiveBayesClassifier_s can save the following pieces of internal state, accessible via
standard setter/getter functions:
1) _Examples_: a list of examples which have been predicted
2) _TrainingExamples_: List of training examples - the descriptor value of these examples
are quantized based on info gain using ML/Data/Quantize.py if necessary
3) _TestExamples_: the list of examples used to test the model
4) _BadExamples_ : list of examples that were incorrectly classified
4) _QBoundVals_: Quant bound values for each varaible - a list of lists
5) _QBounds_ : Number of bounds for each variable
"""
def __init__(self, attrs, nPossibleVals, nQuantBounds, mEstimateVal=-1.0, useSigs=False):
""" Constructor
"""
self._attrs = attrs
self._mEstimateVal = mEstimateVal
self._useSigs = useSigs
self._classProbs = {}
self._examples = []
self._trainingExamples = []
self._testExamples = []
self._badExamples = []
self._QBoundVals = {}
self._nClasses = nPossibleVals[-1]
self._qBounds = nQuantBounds
self._nPosVals = nPossibleVals
self._needsQuant = 1
self._name = ""
self.mprob = -1.0
# for the sake a of efficiency lets try to change the conditional probabilities
# to a numpy array instead of a dictionary. The three dimension array is indexed
# on the activity class, the descriptor ID and the descriptor binID
# self._condProbs = {}
# self._condProbs = numpy.zeros((self._nClasses, max(self._attrs)+1,
# max(self._nPosVals)+1), 'd')
self._condProbs = [None] * self._nClasses
for i in range(self._nClasses):
if not (hasattr(self, '_useSigs') and self._useSigs):
nA = max(self._attrs) + 1
self._condProbs[i] = [None] * nA
for j in range(nA):
nV = self._nPosVals[j]
if self._qBounds[j]:
nV = max(nV, self._qBounds[j] + 1)
self._condProbs[i][j] = [0.0] * nV
else:
self._condProbs[i] = {}
for idx in self._attrs:
self._condProbs[i][idx] = [0.0] * 2
def GetName(self):
return self._name
def SetName(self, name):
self._name = name
def NameModel(self, varNames):
self.SetName('NaiveBayesClassifier')
def GetExamples(self):
return self._examples
def SetExamples(self, examples):
self._examples = examples
def GetTrainingExamples(self):
return self._trainingExamples
def SetTrainingExamples(self, examples):
self._trainingExamples = examples
def GetTestExamples(self):
return self._testExamples
def SetTestExamples(self, examples):
self._testExamples = examples
def SetBadExamples(self, examples):
self._badExamples = examples
def GetBadExamples(self):
return self._badExamples
def _computeQuantBounds(self):
neg = len(self._trainingExamples)
natr = len(self._attrs)
# make a list of results and values
allVals = numpy.zeros((neg, natr), 'd')
res = [] # list of y values
i = 0
for eg in self._trainingExamples:
res.append(eg[-1])
j = 0
for ai in self._attrs:
val = eg[ai]
allVals[i, j] = val
j += 1
i += 1
# now loop over each of the columns and compute the bounds
# the number of bounds is determined by the maximum info gain
i = 0
for ai in self._attrs:
nbnds = self._qBounds[ai]
if nbnds > 0:
mbnds = []
mgain = -1.0
for j in range(1, nbnds + 1):
bnds, igain = Quantize.FindVarMultQuantBounds(
allVals[:, i], j, res, self._nClasses)
if (igain > mgain):
mbnds = bnds
mgain = igain
self._QBoundVals[ai] = mbnds
i += 1
def trainModel(self):
""" We will assume at this point that the training examples have been set
We have to estmate the conditional probabilities for each of the (binned) descriptor
component give a outcome (or class). Also the probabilities for each class is estimated
"""
# first estimate the class probabilities
n = len(self._trainingExamples)
for i in range(self._nClasses):
self._classProbs[i] = 0.0
# for i in range(self._nClasses):
# self._classProbs[i] = float(self._classProbs[i])/n
# first find the bounds for each descriptor value if necessary
if not self._useSigs and max(self._qBounds) > 0:
self._computeQuantBounds()
# now compute the probabilities
ncls = {}
incr = 1.0 / n
for eg in self._trainingExamples:
cls = eg[-1]
self._classProbs[cls] += incr
ncls[cls] = ncls.get(cls, 0) + 1
tmp = self._condProbs[cls]
if not self._useSigs:
for ai in self._attrs:
bid = eg[ai]
if self._qBounds[ai] > 0:
bid = _getBinId(bid, self._QBoundVals[ai])
tmp[ai][bid] += 1.0
else:
for ai in self._attrs:
if eg[1].GetBit(ai):
tmp[ai][1] += 1.0
else:
tmp[ai][0] += 1.0
# for key in self._condProbs:
for cls in range(self._nClasses):
if cls not in ncls:
continue
# cls = key[0]
tmp = self._condProbs[cls]
for ai in self._attrs:
if not self._useSigs:
nbnds = self._nPosVals[ai]
if (self._qBounds[ai] > 0):
nbnds = self._qBounds[ai]
else:
nbnds = 2
for bid in range(nbnds):
if self._mEstimateVal <= 0.0:
# this is simple the fraction of of time this descriptor component assume
# this value for the examples that belong a specific class
# self._condProbs[key] = (float(self._condProbs[key]))/ncls[cls]
tmp[ai][bid] /= ncls[cls]
else:
# this a bit more complicated form - more appropriate for unbalanced data
# see "Machine Learning" by Tom Mitchell section 6.9.1.1
# this is the probability that this descriptor component can take this specific value
# in the lack of any other information is is simply the inverse of the number of
# possible values 'npossible'
# If we quantized this component then
# npossible = 1 + len(self._QBoundVals[ai])
# else if we did no qunatize (the descriptor came quantized)
# npossible = nPossibleVals[ai]
# ai = key[1]
pdesc = 0.0
if self._qBounds[ai] > 0:
pdesc = 1.0 / (1 + len(self._QBoundVals[ai]))
elif (self._nPosVals[ai] > 0):
pdesc = 1.0 / (self._nPosVals[ai])
else:
raise ValueError(
'Neither Bounds set nor data pre-quantized for attribute ' + str(ai))
tmp[ai][bid] += (self._mEstimateVal) * pdesc
tmp[ai][bid] /= (ncls[cls] + self._mEstimateVal)
def ClassifyExamples(self, examples, appendExamples=0):
preds = []
for eg in examples:
pred = self.ClassifyExample(eg, appendExamples)
preds.append(int(pred))
return preds
def GetClassificationDetails(self):
""" returns the probability of the last prediction """
return self.mprob
def ClassifyExample(self, example, appendExamples=0):
""" Classify an example by summing over the conditional probabilities
The most likely class is the one with the largest probability
"""
if appendExamples:
self._examples.append(example)
clsProb = {}
for key, prob in self._classProbs.items():
clsProb[key] = prob
tmp = self._condProbs[key]
for ai in self._attrs:
if not (hasattr(self, '_useSigs') and self._useSigs):
bid = example[ai]
if self._qBounds[ai] > 0:
bid = _getBinId(bid, self._QBoundVals[ai])
else:
if example[1].GetBit(ai):
bid = 1
else:
bid = 0
clsProb[key] *= tmp[ai][bid]
mkey = -1
self.mprob = -1.0
for key, prob in clsProb.items():
if (prob > self.mprob):
mkey = key
self.mprob = prob
return mkey
| 36.024911
| 109
| 0.533735
|
9d3e347088cde32ff29b272a06cbe7014fd5dcf2
| 726
|
py
|
Python
|
fixture/session.py
|
hvolena/python_training
|
621a9342939054f32129f9e0652a786269f0174b
|
[
"Apache-2.0"
] | null | null | null |
fixture/session.py
|
hvolena/python_training
|
621a9342939054f32129f9e0652a786269f0174b
|
[
"Apache-2.0"
] | null | null | null |
fixture/session.py
|
hvolena/python_training
|
621a9342939054f32129f9e0652a786269f0174b
|
[
"Apache-2.0"
] | null | null | null |
class SessionHelper:
def __init__(self, app):
self.app = app
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
while not wd.find_element_by_name("user"):
pass
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_xpath("//input[@value='Login']").click()
| 33
| 67
| 0.632231
|
56a27d2a0f9aab5376986b6edfdd24da60a530ba
| 869
|
py
|
Python
|
first_django_project/first_django_project/urls.py
|
ArRosid/Django-Article
|
92bfb1047b734ac1627ab17163d4d077ed0d2c44
|
[
"MIT"
] | null | null | null |
first_django_project/first_django_project/urls.py
|
ArRosid/Django-Article
|
92bfb1047b734ac1627ab17163d4d077ed0d2c44
|
[
"MIT"
] | null | null | null |
first_django_project/first_django_project/urls.py
|
ArRosid/Django-Article
|
92bfb1047b734ac1627ab17163d4d077ed0d2c44
|
[
"MIT"
] | null | null | null |
"""first_django_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
# from . import views
from .views import home, about_me
urlpatterns = [
path('admin/', admin.site.urls),
path('', home),
path('about/', about_me)
]
| 31.035714
| 77
| 0.703107
|
9e0b7a9517a7020a66b30aa03bb6b6883c97425d
| 6,740
|
py
|
Python
|
src/out/PLDI19evaluation/squeezenet/tensorflow/squeezenet.py
|
supunab/Lantern
|
932a031816617d71c46653f3b2245129a6a8a7c8
|
[
"BSD-3-Clause"
] | 158
|
2018-03-28T21:58:07.000Z
|
2022-02-22T00:49:46.000Z
|
src/out/PLDI19evaluation/squeezenet/tensorflow/squeezenet.py
|
douxiansheng/Lantern
|
f453de532da638c1f467953b32bbe49a3dedfa45
|
[
"BSD-3-Clause"
] | 35
|
2018-09-03T21:27:15.000Z
|
2019-05-11T02:17:49.000Z
|
src/out/PLDI19evaluation/squeezenet/tensorflow/squeezenet.py
|
douxiansheng/Lantern
|
f453de532da638c1f467953b32bbe49a3dedfa45
|
[
"BSD-3-Clause"
] | 15
|
2018-03-29T06:29:22.000Z
|
2020-05-20T06:56:03.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.layers import conv2d, avg_pool2d, max_pool2d
from tensorflow.contrib.layers import batch_norm, l2_regularizer
from tensorflow.contrib.framework import add_arg_scope
from tensorflow.contrib.framework import arg_scope
@add_arg_scope
def fire_module(inputs,
squeeze_depth,
expand_depth,
reuse=None,
scope=None):
with tf.variable_scope(scope, 'fire', [inputs], reuse=reuse):
with arg_scope([conv2d, max_pool2d]):
net = _squeeze(inputs, squeeze_depth)
net = _expand(net, expand_depth)
return net
def fire_module1(inputs, squeeze_depth, expand_depth, reuse=None, scope=None):
with tf.variable_scope(scope, 'fire', [inputs], reuse = reuse):
net = _squeeze(inputs, squeeze_depth)
net = _expand(net, expand_depth)
return net
def _squeeze(inputs, num_outputs):
return conv2d(inputs, num_outputs, [1, 1], stride=1, scope='squeeze')
def _expand(inputs, num_outputs):
with tf.variable_scope('expand'):
e1x1 = conv2d(inputs, num_outputs, [1, 1], stride=1, scope='1x1')
e3x3 = conv2d(inputs, num_outputs, [3, 3], scope='3x3')
return tf.concat([e1x1, e3x3], 1)
class Squeezenet(object):
"""Original squeezenet architecture for 224x224 images."""
name = 'squeezenet'
def __init__(self, args):
self._num_classes = args.num_classes
self._weight_decay = args.weight_decay
self._batch_norm_decay = args.batch_norm_decay
self._is_built = False
def build(self, x, is_training):
self._is_built = True
with tf.variable_scope(self.name, values=[x]):
with arg_scope(_arg_scope(is_training,
self._weight_decay,
self._batch_norm_decay)):
return self._squeezenet(x, self._num_classes)
@staticmethod
def _squeezenet(images, num_classes=1000):
net = conv2d(images, 96, [7, 7], stride=2, scope='conv1')
net = max_pool2d(net, [3, 3], stride=2, scope='maxpool1')
net = fire_module(net, 16, 64, scope='fire2')
net = fire_module(net, 16, 64, scope='fire3')
net = fire_module(net, 32, 128, scope='fire4')
net = max_pool2d(net, [3, 3], stride=2, scope='maxpool4')
net = fire_module(net, 32, 128, scope='fire5')
net = fire_module(net, 48, 192, scope='fire6')
net = fire_module(net, 48, 192, scope='fire7')
net = fire_module(net, 64, 256, scope='fire8')
net = max_pool2d(net, [3, 3], stride=2, scope='maxpool8')
net = fire_module(net, 64, 256, scope='fire9')
net = conv2d(net, num_classes, [1, 1], stride=1, scope='conv10')
net = avg_pool2d(net, [13, 13], stride=1, scope='avgpool10')
logits = tf.squeeze(net, [2], name='logits')
return logits
class Squeezenet_CIFAR1(object):
name = 'squeezenet_cifar'
def __init__(self, args):
self._is_built = False
def build(self, x, is_training):
self._is_built = True
with tf.variable_scope(self.name, values=[x]):
return self._squeezenet(x)
def _squeezenet(images, num_classes = 10):
net = conv2d(images, 96, [3, 3], scope='conv1')
net = max_pool2d(net, [2, 2], scope='maxpool1')
net = fire_module1(net, 16, 64, scope='fire2')
net = fire_module1(net, 16, 64, scope='fire3')
net = fire_module1(net, 32, 128, scope='fire4')
net = max_pool2d(net, [2, 2], scope='maxpool4')
net = fire_module1(net, 32, 128, scope='fire5')
net = fire_module1(net, 48, 192, scope='fire6')
net = fire_module1(net, 48, 192, scope='fire7')
net = fire_module1(net, 64, 256, scope='fire8')
net = max_pool2d(net, [2, 2], scope='maxpool8')
net = fire_module1(net, 64, 256, scope='fire9')
net = avg_pool2d(net, [4, 4], scope='avgpool10')
net = conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='conv10')
logits = tf.squeeze(net, [2], name='logits')
return logits
class Squeezenet_CIFAR(object):
"""Modified version of squeezenet for CIFAR images"""
name = 'squeezenet_cifar'
def __init__(self, args):
self._weight_decay = args.weight_decay
self._batch_norm_decay = args.batch_norm_decay
self._is_built = False
def build(self, x, is_training):
self._is_built = True
with tf.variable_scope(self.name, values=[x]):
# with arg_scope(_arg_scope(is_training,
# self._weight_decay,
# self._batch_norm_decay)):
with arg_scope([conv2d, avg_pool2d, max_pool2d],
data_format='NCHW'):
return self._squeezenet(x)
@staticmethod
def _squeezenet(images, num_classes=10):
net = conv2d(images, 96, [3, 3], scope='conv1')
net = max_pool2d(net, [2, 2], scope='maxpool1')
net = fire_module(net, 16, 64, scope='fire2')
net = fire_module(net, 16, 64, scope='fire3')
net = fire_module(net, 32, 128, scope='fire4')
net = max_pool2d(net, [2, 2], scope='maxpool4')
net = fire_module(net, 32, 128, scope='fire5')
net = fire_module(net, 48, 192, scope='fire6')
net = fire_module(net, 48, 192, scope='fire7')
net = fire_module(net, 64, 256, scope='fire8')
net = max_pool2d(net, [2, 2], scope='maxpool8')
net = fire_module(net, 64, 256, scope='fire9')
net = avg_pool2d(net, [4, 4], scope='avgpool10')
net = conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='conv10')
logits = tf.squeeze(net, [2], name='logits')
return logits
def _arg_scope(is_training, weight_decay, bn_decay):
with arg_scope([conv2d],
weights_regularizer=l2_regularizer(weight_decay),
normalizer_fn=batch_norm,
normalizer_params={'is_training': is_training,
'fused': True,
'decay': bn_decay}):
with arg_scope([conv2d, avg_pool2d, max_pool2d, batch_norm],
data_format='NCHW') as sc:
return sc
'''
Network in Network: https://arxiv.org/abs/1312.4400
See Section 3.2 for global average pooling
'''
| 40.848485
| 78
| 0.59451
|
c72a3e0eeba605d114269b5f1257605f8211b8cc
| 1,175
|
py
|
Python
|
kolibri/plugins/device/kolibri_plugin.py
|
MBKayro/kolibri
|
0a38a5fb665503cf8f848b2f65938e73bfaa5989
|
[
"MIT"
] | 545
|
2016-01-19T19:26:55.000Z
|
2022-03-20T00:13:04.000Z
|
kolibri/plugins/device/kolibri_plugin.py
|
MBKayro/kolibri
|
0a38a5fb665503cf8f848b2f65938e73bfaa5989
|
[
"MIT"
] | 8,329
|
2016-01-19T19:32:02.000Z
|
2022-03-31T21:23:12.000Z
|
kolibri/plugins/device/kolibri_plugin.py
|
MBKayro/kolibri
|
0a38a5fb665503cf8f848b2f65938e73bfaa5989
|
[
"MIT"
] | 493
|
2016-01-19T19:26:48.000Z
|
2022-03-28T14:35:05.000Z
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from kolibri.core.auth.constants.user_kinds import SUPERUSER
from kolibri.core.device.utils import get_device_setting
from kolibri.core.hooks import NavigationHook
from kolibri.core.hooks import RoleBasedRedirectHook
from kolibri.core.webpack.hooks import WebpackBundleHook
from kolibri.plugins import KolibriPluginBase
from kolibri.plugins.hooks import register_hook
class DeviceManagementPlugin(KolibriPluginBase):
untranslated_view_urls = "api_urls"
translated_view_urls = "urls"
@register_hook
class DeviceManagementAsset(WebpackBundleHook):
bundle_id = "app"
@property
def plugin_data(self):
return {
"isSubsetOfUsersDevice": get_device_setting(
"subset_of_users_device", False
),
}
@register_hook
class DeviceRedirect(RoleBasedRedirectHook):
roles = (SUPERUSER,)
@property
def url(self):
return self.plugin_url(DeviceManagementPlugin, "device_management")
@register_hook
class DeviceManagementNavItem(NavigationHook):
bundle_id = "side_nav"
| 26.704545
| 75
| 0.772766
|
149a40181ae1a97390338f32baa92c4c174ed096
| 377
|
py
|
Python
|
opentech/public/utils/context_processors.py
|
stdevteam/opentech.fund
|
6888dc5aa1a8c60f17629dff03877412275e08a5
|
[
"BSD-3-Clause"
] | null | null | null |
opentech/public/utils/context_processors.py
|
stdevteam/opentech.fund
|
6888dc5aa1a8c60f17629dff03877412275e08a5
|
[
"BSD-3-Clause"
] | null | null | null |
opentech/public/utils/context_processors.py
|
stdevteam/opentech.fund
|
6888dc5aa1a8c60f17629dff03877412275e08a5
|
[
"BSD-3-Clause"
] | null | null | null |
from opentech.apply.home.models import ApplyHomePage
from opentech.public.home.models import HomePage
from opentech.public.mailchimp.forms import NewsletterForm
def global_vars(request):
return {
'APPLY_SITE': ApplyHomePage.objects.first().get_site(),
'PUBLIC_SITE': HomePage.objects.first().get_site(),
'newsletter_form': NewsletterForm()
}
| 29
| 63
| 0.737401
|
08c20ee47538fed4491627eaebbbd46ac2b9b0c9
| 5,877
|
py
|
Python
|
plugins/trigger_operators.py
|
slaclab/cryoem-pipeline
|
b3fcfb6072fda64164eaef7681d82fa3838dc05e
|
[
"MIT"
] | null | null | null |
plugins/trigger_operators.py
|
slaclab/cryoem-pipeline
|
b3fcfb6072fda64164eaef7681d82fa3838dc05e
|
[
"MIT"
] | null | null | null |
plugins/trigger_operators.py
|
slaclab/cryoem-pipeline
|
b3fcfb6072fda64164eaef7681d82fa3838dc05e
|
[
"MIT"
] | 1
|
2020-12-13T00:00:53.000Z
|
2020-12-13T00:00:53.000Z
|
from airflow.plugins_manager import AirflowPlugin
from airflow.operators.trigger_dagrun import TriggerDagRunOperator
from airflow_multi_dagrun.operators import TriggerMultiDagRunOperator
from airflow.exceptions import AirflowException, AirflowSkipException
from airflow import settings
import contextlib
from airflow.models import DagRun, DagBag
from airflow.utils.state import State
from pathlib import Path
from datetime import datetime
from time import sleep
import re
import logging
LOG = logging.getLogger(__name__)
def trigger_preprocessing(context):
""" calls the preprocessing dag: pass the filenames of the stuff to process """
# we have a jpg, xml, small mrc and large mrc, and gainref dm4 file
# assume the common filename is the same and allow the preprocessing dag wait for the other files? what happens if two separate calls to the same dag occur?
found = {}
if context == None:
return
for f in context['ti'].xcom_pull( task_ids='rsync', key='return_value' ):
this = Path(f).resolve().stem
for pattern in ( r'\-\d+$', r'\-gain\-ref$' ):
if re.search( pattern, this):
this = re.sub( pattern, '', this)
#LOG.info("mapped: %s -> %s" % (f, this))
# LOG.info("this: %s, f: %s" % (this,f))
# EPU SPA
# epu2
if '_Fractions' in f and f.endswith('.xml'):
base = re.sub( '_Fractions', '', this )
found[base] = True
# epu1
elif f.endswith('.xml') and not f.startswith('Atlas') and not f.startswith('Tile_') and '_Data_' in f and not '_Fractions' in f:
found[this] = True
# tomo4 tomography file
elif '[' in this and ']' in this:
t = this.split(']')[0] + ']'
found[t] = True
# serialem
else:
# tomogram
m = re.match( r'^(?P<base>.*)\_(?P<seq>\d{5}\_\d{3})\_(?P<angle>\-?\d{1,2}\.\d)\_(?P<other>.*)?\.(mrc|tif)$', f )
if m:
n = m.groupdict()
fn = "%s_%s[%s]" % (n['base'], n['seq'], n['angle'])
# ignore countref files
if not 'CountRef' in n['base']:
found[ fn ] = True
# single particle
else:
m = re.match( r'^(?P<base>.*\_\d\d\d\d\d)(\_.*)?\.(tif|tiff|mrc)$', f )
if m:
b = m.groupdict()['base']
LOG.info('found %s' % (b,))
found[b] = True
else:
# stupid new epu
m = re.match( r'^(?P<base>.*)\_fractions\.(tiff|mrc)$', f )
if m:
n = m.groupdict()['base']
if n != 'PreviewDoseFraction':
found[n] = True
# LOG.info("FOUND: %s" % (found,))
for base_filename,_ in sorted(found.items()):
sample = context['ti'].xcom_pull( task_ids='config', key='sample' )
inst = context['ti'].xcom_pull( task_ids='config', key='instrument' )
name = context['ti'].xcom_pull( task_ids='config', key='experiment' )
run_id = '%s__%s' % (name, base_filename)
#dro = DagRunOrder(run_id=run_id)
d = sample['params']
d['data_directory'] = context['ti'].xcom_pull( task_ids='config', key='directory_suffix' ) + '/' + sample['guid']
d['base'] = base_filename
d['experiment'] = name
d['microscope'] = inst['_id']
d['cs'] = inst['params']['cs']
d['keV'] = inst['params']['keV']
# only do single-particle
LOG.info('triggering run id %s with %s' % (run_id,d))
#dro.payload = d
#yield dro
dro = {'run_id': run_id, 'payload': payload}
yield dro
return
def trigger_null(context):
raise AirflowSkipException('Intentionally not doing it')
@contextlib.contextmanager
def create_session():
session = settings.Session()
try:
yield session
session.expunge_all()
session.commit()
except:
session.rollback()
raise
finally:
session.close()
class TriggerMultiDagRunOperator(TriggerDagRunOperator):
template_fields = ('trigger_dag_id', 'dry_run' )
def __init__(self, dry_run=False, *args,**kwargs):
self.dry_run = dry_run
self.python_callable = trigger_preprocessing
super( TriggerMultiDagRunOperator, self ).__init__( *args, **kwargs )
def execute(self, context):
count = 0
self.python_callable = trigger_preprocessing
dry = True if self.dry_run.lower() == 'true' else False
try:
with create_session() as session:
dbag = DagBag(settings.DAGS_FOLDER)
trigger_dag = dbag.get_dag(self.trigger_dag_id)
# get dro's
for dro in self.python_callable(context):
if dro and not dry:
try:
dr = trigger_dag.create_dagrun(
run_id=dro['run_id'],
state=State.RUNNING,
conf=dro['payload'],
external_trigger=True)
# LOG.info("Creating DagRun %s", dr)
session.add(dr)
count = count + 1
except Exception as e:
LOG.error("Could not add %s: %s" % (dro.run_id,e))
session.commit()
except Exception as e:
LOG.error("Could not connect to airflow")
if count == 0:
raise AirflowSkipException('No external dags triggered')
class TriggerPlugin(AirflowPlugin):
name = 'trigger_plugin'
operators = [TriggerMultiDagRunOperator,]
| 35.835366
| 161
| 0.537349
|
1849732d58598a4fd39cd95f066af6f711120f43
| 3,612
|
py
|
Python
|
frontend/main.py
|
Fox520/PaymentGateway
|
c4c0675b056ac377b135de778069b1c63883b6de
|
[
"MIT"
] | 1
|
2021-11-23T16:33:39.000Z
|
2021-11-23T16:33:39.000Z
|
frontend/main.py
|
Fox520/PaymentGateway
|
c4c0675b056ac377b135de778069b1c63883b6de
|
[
"MIT"
] | null | null | null |
frontend/main.py
|
Fox520/PaymentGateway
|
c4c0675b056ac377b135de778069b1c63883b6de
|
[
"MIT"
] | null | null | null |
import os
import json
import webbrowser
from kivy.network.urlrequest import UrlRequest
from kivy.storage.dictstore import DictStore
from kivy.app import App
from kivy.properties import StringProperty
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen
BASE_URL = "https://c73e12ff8ac1.ngrok.io"
USER_ID = "user1"
store = DictStore("tempo.dat")
Builder.load_string(
"""
<HomeScreen>:
BoxLayout:
orientation: "vertical"
Button:
text: "Create order"
on_release: root.create_order()
Button:
text: "start transaction"
on_release: root.start_transaction()
Button:
text: "check if transaction is complete"
on_release: root.is_transaction_complete()
Label:
text: root.status
"""
)
# Declare both screens
class HomeScreen(Screen):
status = StringProperty("meh")
def create_order(self):
my_order = {
"uid": "user1",
"products": [
{"product_id": "prd1", "count": 3},
{"product_id": "prd2", "count": 2},
],
}
headers = {'Content-type': 'application/json',
'Accept': 'application/json'}
UrlRequest(
f"{BASE_URL}/create-order",
req_body=json.dumps(my_order),
req_headers=headers,
on_success=self.on_order_success,
on_error=self.on_order_error,
)
def on_order_success(self, *args):
# (<UrlRequest(Thread-1, started daemon 7296)>, '{\n "cost": "70.45"\n}\n')
data = json.loads(args[1])
self.status = str(data)
def on_order_error(self, *args):
self.status = str(args)
def start_transaction(self):
info = {
"uid": "user1",
"currency":"ZAR",
"locale": "en-za",
"country": "ZAF",
"pay_method":"CC",
"email": "customer@paygate.co.za"
}
headers = {'Content-type': 'application/json',
'Accept': 'application/json'}
UrlRequest(
f"{BASE_URL}/start-transaction",
req_body=json.dumps(info),
req_headers=headers,
on_success=self.on_start_transaction_success,
on_error=self.on_start_transaction_error,
)
def on_start_transaction_success(self, *args):
data = args[1]
transaction_ref = data["transaction_ref"]
store.put("transaction_ref", transaction_ref=transaction_ref)
html_form = data["data"]
f = open("temporary.html", "w")
f.write(html_form)
f.close()
webbrowser.open(os.path.abspath("temporary.html"))
def on_start_transaction_error(self, *args):
self.status = str(args)
def is_transaction_complete(self):
order_id = store.get("transaction_ref")["transaction_ref"]
print(order_id)
UrlRequest(
f"{BASE_URL}/is-order-complete/{USER_ID}/{order_id}",
on_success=self.on_transaction_complete_success,
on_error=self.on_transaction_complete_error,
)
def on_transaction_complete_success(self, *args):
self.status = str(args)
def on_transaction_complete_error(self, *args):
self.status = str(args)
# Create the screen manager
sm = ScreenManager()
sm.add_widget(HomeScreen(name="menu"))
class MyShop(App):
def build(self):
return sm
def on_resume(self):
return True
if __name__ == "__main__":
MyShop().run()
| 28.21875
| 86
| 0.590808
|
433efa9ec98b5c6296506866da1cdd77611469e4
| 6,385
|
py
|
Python
|
examples/NAS-Envelopenet-trainingcontainer/evaluate.py
|
adamjm/katib
|
41143e8aa3affd0835727df379c38eaf298593cc
|
[
"Apache-2.0"
] | null | null | null |
examples/NAS-Envelopenet-trainingcontainer/evaluate.py
|
adamjm/katib
|
41143e8aa3affd0835727df379c38eaf298593cc
|
[
"Apache-2.0"
] | 1
|
2019-06-14T21:22:35.000Z
|
2019-06-14T21:22:35.000Z
|
examples/NAS-Envelopenet-trainingcontainer/evaluate.py
|
adamjm/katib
|
41143e8aa3affd0835727df379c38eaf298593cc
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
import math
import numpy as np
import tensorflow as tf
import net
class Evaluate(object):
def __init__(self, arch, config, checkpoint_dir):
self.task_config = config
self.arch = arch
self.checkpoint_dir = checkpoint_dir
self.get_task_params()
def get_task_params(self):
self.log_stats = self.task_config["log_stats"]
self.dataset = self.task_config["dataset"]
self.image_size = self.task_config["input_size"]
self.init_cell={
"Layer0": {"Branch0": {"block": "conv2d", "kernel_size": [1, 1], "outputs": 64}},
"Layer2": {"Branch0": {"block": "lrn" }}
}
self.classification_cell={
"Layer0": {"Branch0": {"block": "reduce_mean", "size": [1, 2]}},
"Layer1": {"Branch0": {"block": "flatten", "size": [3, 3]}},
"Layer2": {"Branch0": {"block": "dropout", "keep_prob": 0.8}},
"Layer3": {"Branch0": {"block": "fc-final", "inputs": 192, "outputs": 10}}
}
self.batch_size = self.task_config["batch_size"]
self.num_examples = 10000
self.run_once = True
self.eval_dir = os.path.join(self.task_config["data_dir"],"results",
"nac_envelopenet","evaluate")
self.evaluate()
def eval_once(self, saver, summary_writer, top_k_op, summary_op, k=1):
"""Run Eval once.
Args:
saver: Saver.
summary_writer: Summary writer.
top_k_op: Top K op.
summary_op: Summary op.
"""
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(self.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/cifar10_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split(
'/')[-1].split('-')[-1]
else:
print('No checkpoint file found')
return
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for q_runner in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(
q_runner.create_threads(
sess,
coord=coord,
daemon=True,
start=True))
num_iter = int(math.ceil(self.num_examples / self.batch_size))
true_count = 0 # Counts the number of correct predictions.
total_sample_count = num_iter * self.batch_size
step = 0
while step < num_iter and not coord.should_stop():
predictions = sess.run([top_k_op])
true_count += np.sum(predictions)
step += 1
if k == 1:
# Compute precision @ 1.
precision = true_count / total_sample_count
print(
'%s: precision @ 1 = %.3f' %
(datetime.now(), precision))
elif k == 5:
# Compute precision @ 5.
precision = true_count / total_sample_count
print(
'%s: precision @ 5 = %.3f' %
(datetime.now(), precision))
summary = tf.Summary()
summary.ParseFromString(sess.run(summary_op))
summary.value.add(
tag='Precision @ %d' %
(k), simple_value=precision)
summary_writer.add_summary(summary, global_step)
except Exception as excpn: # pylint: disable=broad-except
coord.request_stop(excpn)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def evaluate(self):
network = net.Net(self.arch, self.task_config)
"""Eval a network for a number of steps."""
with tf.Graph().as_default() as grph:
# Get images and labels for CIFAR-10.
eval_data = True
images, labels = network.inputs(eval_data=eval_data)
arch = self.arch
init_cell = self.init_cell
classification_cell = self.classification_cell
log_stats = self.log_stats
scope = "Nacnet"
is_training = False
logits = network.inference(images,
arch,
init_cell,
classification_cell,
log_stats,
is_training,
scope)
# Calculate predictions.
# if imagenet is running then run precision@1,5
top_k_op = tf.nn.in_top_k(logits, labels, 1)
if self.dataset == "imagenet":
# Quick dirty fixes to incorporate changes brought by
# imagenet
self.num_examples = 50000
top_5_op = tf.nn.in_top_k(logits, labels, 5)
# Restore the moving average version of the learned variables for
# eval.
variable_averages = tf.train.ExponentialMovingAverage(
net.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Build the summary operation based on the TF collection of
# Summaries.
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(self.eval_dir, grph)
while True:
self.eval_once(saver, summary_writer, top_k_op, summary_op)
if self.dataset == "imagenet":
self.eval_once(saver, summary_writer, top_5_op,
summary_op,
k=5)
if self.run_once:
break
| 40.66879
| 89
| 0.511198
|
41b7865f3e1f2aae69df2318bc3af482c604e0e5
| 3,680
|
py
|
Python
|
bin/Python27/Lib/site-packages/scipy/optimize/tests/test_cobyla.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
bin/Python27/Lib/site-packages/scipy/optimize/tests/test_cobyla.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
bin/Python27/Lib/site-packages/scipy/optimize/tests/test_cobyla.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | 1
|
2020-05-07T11:04:14.000Z
|
2020-05-07T11:04:14.000Z
|
from __future__ import division, print_function, absolute_import
import math
import numpy as np
from numpy.testing import assert_allclose, TestCase, run_module_suite, \
assert_
from scipy.optimize import fmin_cobyla, minimize
class TestCobyla(TestCase):
def setUp(self):
self.x0 = [4.95, 0.66]
self.solution = [math.sqrt(25 - (2.0/3)**2), 2.0/3]
self.opts = {'disp': False, 'rhobeg': 1, 'tol': 1e-5,
'maxiter': 100}
def fun(self, x):
return x[0]**2 + abs(x[1])**3
def con1(self, x):
return x[0]**2 + x[1]**2 - 25
def con2(self, x):
return -self.con1(x)
def test_simple(self):
x = fmin_cobyla(self.fun, self.x0, [self.con1, self.con2], rhobeg=1,
rhoend=1e-5, iprint=0, maxfun=100)
assert_allclose(x, self.solution, atol=1e-4)
def test_minimize_simple(self):
# Minimize with method='COBYLA'
cons = ({'type': 'ineq', 'fun': self.con1},
{'type': 'ineq', 'fun': self.con2})
sol = minimize(self.fun, self.x0, method='cobyla', constraints=cons,
options=self.opts)
assert_allclose(sol.x, self.solution, atol=1e-4)
assert_(sol.success, sol.message)
assert_(sol.maxcv < 1e-5, sol)
assert_(sol.nfev < 70, sol)
assert_(sol.fun < self.fun(self.solution) + 1e-3, sol)
def test_minimize_constraint_violation(self):
np.random.seed(1234)
pb = np.random.rand(10, 10)
spread = np.random.rand(10)
def p(w):
return pb.dot(w)
def f(w):
return -(w * spread).sum()
def c1(w):
return 500 - abs(p(w)).sum()
def c2(w):
return 5 - abs(p(w).sum())
def c3(w):
return 5 - abs(p(w)).max()
cons = ({'type': 'ineq', 'fun': c1},
{'type': 'ineq', 'fun': c2},
{'type': 'ineq', 'fun': c3})
w0 = np.zeros((10, 1))
sol = minimize(f, w0, method='cobyla', constraints=cons,
options={'catol': 1e-6})
assert_(sol.maxcv > 1e-6)
assert_(not sol.success)
def test_vector_constraints():
# test that fmin_cobyla and minimize can take a combination
# of constraints, some returning a number and others an array
def fun(x):
return (x[0] - 1)**2 + (x[1] - 2.5)**2
def fmin(x):
return fun(x) - 1
def cons1(x):
a = np.array([[1, -2, 2], [-1, -2, 6], [-1, 2, 2]])
return np.array([a[i, 0] * x[0] + a[i, 1] * x[1] +
a[i, 2] for i in range(len(a))])
def cons2(x):
return x # identity, acts as bounds x > 0
x0 = np.array([2, 0])
cons_list = [fun, cons1, cons2]
xsol = [1.4, 1.7]
fsol = 0.8
# testing fmin_cobyla
sol = fmin_cobyla(fun, x0, cons_list, rhoend=1e-5, iprint=0)
assert_allclose(sol, xsol, atol=1e-4)
sol = fmin_cobyla(fun, x0, fmin, rhoend=1e-5, iprint=0)
assert_allclose(fun(sol), 1, atol=1e-4)
# testing minimize
constraints = [{'type': 'ineq', 'fun': cons} for cons in cons_list]
sol = minimize(fun, x0, constraints=constraints, tol=1e-5)
assert_allclose(sol.x, xsol, atol=1e-4)
assert_(sol.success, sol.message)
assert_allclose(sol.fun, fsol, atol=1e-4)
constraints = {'type': 'ineq', 'fun': fmin}
sol = minimize(fun, x0, constraints=constraints, tol=1e-5)
assert_allclose(sol.fun, 1, atol=1e-4)
if __name__ == "__main__":
run_module_suite()
| 30.92437
| 77
| 0.533424
|
8e7a57824055e65e1a88d14c274b24445b020e94
| 8,028
|
py
|
Python
|
moe/optimal_learning/python/python_version/domain.py
|
mikepsinn/MOE
|
fdc2e1318d37c89c54a7b7902c12f8f164f517e8
|
[
"Apache-2.0"
] | null | null | null |
moe/optimal_learning/python/python_version/domain.py
|
mikepsinn/MOE
|
fdc2e1318d37c89c54a7b7902c12f8f164f517e8
|
[
"Apache-2.0"
] | null | null | null |
moe/optimal_learning/python/python_version/domain.py
|
mikepsinn/MOE
|
fdc2e1318d37c89c54a7b7902c12f8f164f517e8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Various python implementations of interfaces.domain_interface.DomainInterface (e.g., TensorProduct).
These are currently used to describe domain limits for optimizers (i.e., implementations of
:mod:`moe.optimal_learning.python.interfaces.optimization_interface`).
Each domain provides functions to:
* Describe the set of boundary planes
* Check whether a point is inside/outside
* Generate random point(s) inside
* Generate points on a fixed grid
* Limit updates (from optimizers) so that a path stays inside the domain
"""
import copy
import numpy
from moe.optimal_learning.python.constant import TENSOR_PRODUCT_DOMAIN_TYPE
from moe.optimal_learning.python.geometry_utils import generate_grid_points, generate_latin_hypercube_points
from moe.optimal_learning.python.interfaces.domain_interface import DomainInterface
class TensorProductDomain(DomainInterface):
r"""Domain type for a tensor product domain.
A d-dimensional tensor product domain is ``D = [x_0_{min}, x_0_{max}] X [x_1_{min}, x_1_{max}] X ... X [x_d_{min}, x_d_{max}]``
"""
domain_type = TENSOR_PRODUCT_DOMAIN_TYPE
def __init__(self, domain_bounds):
"""Construct a TensorProductDomain with the specified bounds.
:param domain_bounds: the boundaries of a dim-dimensional tensor-product domain
:type domain_bounds: iterable of dim :class:`moe.optimal_learning.python.geometry_utils.ClosedInterval`
"""
self._domain_bounds = copy.deepcopy(domain_bounds)
for interval in self._domain_bounds:
if interval.is_empty():
raise ValueError('Tensor product region is EMPTY.')
@property
def dim(self):
"""Return the number of spatial dimensions."""
return len(self._domain_bounds)
def get_json_serializable_info(self, minimal=False):
"""Create and return a domain_info dictionary of this domain object.
:param minimal: True for all domain contents; False for ``domain_type`` and ``dim`` only
:type minimal: bool
:return: dict representation of this domain
:rtype: dict
"""
response = {
'domain_type': self.domain_type,
'dim': self.dim,
}
if not minimal:
response['domain_bounds'] = self._domain_bounds
return response
def check_point_inside(self, point):
r"""Check if a point is inside the domain/on its boundary or outside.
:param point: point to check
:type point: array of float64 with shape (dim)
:return: true if point is inside the domain
:rtype: bool
"""
# Generate a list of bool; i-th entry is True if i-th coordinate is inside the i-th bounds.
# Then check that all entries are True.
return all([interval.is_inside(point[i]) for i, interval in enumerate(self._domain_bounds)])
def get_bounding_box(self):
"""Return a list of ClosedIntervals representing a bounding box for this domain."""
return copy.copy(self._domain_bounds)
def generate_random_point_in_domain(self, random_source=None):
"""Generate ``point`` uniformly at random such that ``self.check_point_inside(point)`` is True.
.. Note:: if you need multiple points, use generate_uniform_random_points_in_domain instead; it
yields better distributions over many points (via latin hypercube samplling) b/c it guarantees
that no non-uniform clusters may arise (in subspaces) versus this method which treats all draws
independently.
:return: point in domain
:rtype: array of float64 with shape (dim)
"""
return numpy.array([numpy.random.uniform(interval.min, interval.max) for interval in self._domain_bounds])
def generate_uniform_random_points_in_domain(self, num_points, random_source=None):
r"""Generate ``num_points`` on a latin-hypercube (i.e., like a checkerboard).
See python.geometry_utils.generate_latin_hypercube_points for more details.
:param num_points: max number of points to generate
:type num_points: integer >= 0
:param random_source: random source producing uniform random numbers (e.g., numpy.random.uniform) (UNUSED)
:type random_source: callable yielding uniform random numbers in [0,1]
:return: uniform random sampling of points from the domain
:rtype: array of float64 with shape (num_points, dim)
"""
# TODO(GH-56): Allow users to pass in a random source.
return generate_latin_hypercube_points(num_points, self._domain_bounds)
def generate_grid_points_in_domain(self, points_per_dimension, random_source=None):
"""Generate a grid of ``N_0 by N_1 by ... by N_{dim-1}`` points, with each dimension uniformly spaced along the domain boundary.
See python.geometry_utils.generate_grid_points for more details.
:param points_per_dimension: (n_1, n_2, ... n_{dim}) number of stencil points per spatial dimension.
If len(points_per_dimension) == 1, then n_i = len(points_per_dimension)
:type points_per_dimension: tuple or scalar
:param random_source: random source producing uniform random numbers (e.g., numpy.random.uniform) (UNUSED)
:type random_source: callable yielding uniform random numbers in [0,1]
:return: uniform random sampling of points from the domain
"""
# TODO(GH-56): Allow users to pass in a random source.
return generate_grid_points(points_per_dimension, self._domain_bounds)
def compute_update_restricted_to_domain(self, max_relative_change, current_point, update_vector):
r"""Compute a new update so that CheckPointInside(``current_point`` + ``new_update``) is true.
Changes new_update_vector so that:
``point_new = point + new_update_vector``
has coordinates such that ``CheckPointInside(point_new)`` returns true. We select ``point_new``
by projecting ``point + update_vector`` to the nearest point on the domain.
``new_update_vector`` is a function of ``update_vector``.
``new_update_vector`` is just a copy of ``update_vector`` if ``current_point`` is already inside the domain.
.. NOTE::
We modify update_vector (instead of returning point_new) so that further update
limiting/testing may be performed.
:param max_relative_change: max change allowed per update (as a relative fraction of current distance to boundary)
:type max_relative_change: float64 in (0, 1]
:param current_point: starting point
:type current_point: array of float64 with shape (dim)
:param update_vector: proposed update
:type update_vector: array of float64 with shape (dim)
:return: new update so that the final point remains inside the domain
:rtype: array of float64 with shape (dim)
"""
# TODO(GH-58): Vectorize the loop over j, step.
output_update = numpy.empty(self.dim)
# Note: since all boundary planes are axis-aligned, projecting becomes very simple.
for j, step in enumerate(update_vector):
# Distance to the nearest boundary in the j-th dimension
distance_to_boundary = numpy.fmin(
current_point[j] - self._domain_bounds[j].min,
self._domain_bounds[j].max - current_point[j])
desired_step = step
# If we are close to a boundary, optionally (via max_relative_change) limit the step size
# 0 < max_relative_change <= 1 so at worst we reach the boundary.
if numpy.fabs(step) > max_relative_change * distance_to_boundary:
# Move the max allowed distance, in the original direction of travel (obtained via copy-sign)
desired_step = numpy.copysign(max_relative_change * distance_to_boundary, step)
output_update[j] = desired_step
return output_update
| 44.849162
| 136
| 0.692576
|
2401261db541681371f9647577ed258f4458a6d1
| 1,805
|
py
|
Python
|
zk_shell/tree.py
|
sellers/zk_shell
|
5f5972c4362212f97de91a75e44d2a551c7bcd51
|
[
"Apache-2.0"
] | 163
|
2015-01-24T06:17:34.000Z
|
2021-12-17T22:58:46.000Z
|
zk_shell/tree.py
|
sellers/zk_shell
|
5f5972c4362212f97de91a75e44d2a551c7bcd51
|
[
"Apache-2.0"
] | 86
|
2015-01-01T00:22:57.000Z
|
2022-03-02T14:50:59.000Z
|
zk_shell/tree.py
|
sellers/zk_shell
|
5f5972c4362212f97de91a75e44d2a551c7bcd51
|
[
"Apache-2.0"
] | 32
|
2015-02-18T17:33:16.000Z
|
2021-12-28T03:43:45.000Z
|
"""
Async tree builder
Example usage:
>>> from kazoo.client import KazooClient
>>> from zk_shell.tree import Tree
>>> zk = KazooClient(hosts)
>>> zk.start()
>>> gen = PathMap(zk, "/configs").get()
>>> str([path for path in gen])
[
'servers',
'ports',
]
>>> zk.stop()
"""
import os
try:
from Queue import Queue
except ImportError: # py3k
from queue import Queue
from kazoo.exceptions import NoAuthError, NoNodeError
class Request(object):
__slots__ = ('path', 'result')
def __init__(self, path, result):
self.path, self.result = path, result
@property
def value(self):
return self.result.get()
class Tree(object):
__slots__ = ("zk", "path")
def __init__(self, zk, path):
self.zk, self.path = zk, path
def get(self, exclude_recurse=None):
"""
Paths matching exclude_recurse will not be recursed.
"""
reqs = Queue()
pending = 1
path = self.path
zk = self.zk
def child_of(path):
return zk.get_children_async(path)
def dispatch(path):
return Request(path, child_of(path))
stat = zk.exists(path)
if stat is None or stat.numChildren == 0:
return
reqs.put(dispatch(path))
while pending:
req = reqs.get()
try:
children = req.value
for child in children:
cpath = os.path.join(req.path, child)
if exclude_recurse is None or exclude_recurse not in child:
pending += 1
reqs.put(dispatch(cpath))
yield cpath
except (NoNodeError, NoAuthError): pass
pending -= 1
| 22.283951
| 79
| 0.539612
|
6332a649ca12944902f405f9042c9685c6593101
| 7,376
|
py
|
Python
|
src/ptb/ptb_enas_controller.py
|
ankdesh/enas
|
95fa07744440c0ae79f7f7a108ef08ee5099fdca
|
[
"Apache-2.0"
] | 7
|
2018-05-30T06:44:03.000Z
|
2021-03-11T03:59:43.000Z
|
src/ptb/ptb_enas_controller.py
|
ankdesh/enas
|
95fa07744440c0ae79f7f7a108ef08ee5099fdca
|
[
"Apache-2.0"
] | 2
|
2018-11-15T06:31:05.000Z
|
2020-06-27T03:07:08.000Z
|
src/ptb/ptb_enas_controller.py
|
wangshy31/enas
|
18cfbbf4dbb82b859ee81a40fbc3357da650d6da
|
[
"Apache-2.0"
] | 3
|
2018-06-28T16:40:12.000Z
|
2019-11-18T11:42:16.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import time
import numpy as np
import tensorflow as tf
from src.utils import get_train_ops
from src.common_ops import stack_lstm
from tensorflow.python.training import moving_averages
class PTBEnasController(object):
def __init__(self,
rhn_depth=5,
lstm_size=32,
lstm_num_layers=2,
lstm_keep_prob=1.0,
tanh_constant=None,
temperature=None,
lr_init=1e-3,
lr_dec_start=0,
lr_dec_every=100,
lr_dec_rate=0.9,
l2_reg=0,
entropy_weight=None,
clip_mode=None,
grad_bound=None,
bl_dec=0.999,
optim_algo="adam",
sync_replicas=False,
num_aggregate=None,
num_replicas=None,
name="controller"):
print("-" * 80)
print("Building PTBEnasController")
self.rhn_depth = rhn_depth
self.lstm_size = lstm_size
self.lstm_num_layers = lstm_num_layers
self.lstm_keep_prob = lstm_keep_prob
self.tanh_constant = tanh_constant
self.temperature = temperature
self.lr_init = lr_init
self.lr_dec_start = lr_dec_start
self.lr_dec_every = lr_dec_every
self.lr_dec_rate = lr_dec_rate
self.l2_reg = l2_reg
self.entropy_weight = entropy_weight
self.clip_mode = clip_mode
self.grad_bound = grad_bound
self.bl_dec = bl_dec
self.optim_algo = optim_algo
self.sync_replicas = sync_replicas
self.num_aggregate = num_aggregate
self.num_replicas = num_replicas
self.name = name
self._create_params()
self._build_sampler()
def _create_params(self):
initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)
with tf.variable_scope(self.name, initializer=initializer):
with tf.variable_scope("lstm"):
self.w_lstm = []
for layer_id in xrange(self.lstm_num_layers):
with tf.variable_scope("layer_{}".format(layer_id)):
w = tf.get_variable("w", [2 * self.lstm_size, 4 * self.lstm_size])
self.w_lstm.append(w)
num_funcs = 4
with tf.variable_scope("embedding"):
self.g_emb = tf.get_variable("g_emb", [1, self.lstm_size])
self.w_emb = tf.get_variable("w", [num_funcs, self.lstm_size])
with tf.variable_scope("softmax"):
self.w_soft = tf.get_variable("w", [self.lstm_size, num_funcs])
with tf.variable_scope("attention"):
self.attn_w_1 = tf.get_variable("w_1", [self.lstm_size, self.lstm_size])
self.attn_w_2 = tf.get_variable("w_2", [self.lstm_size, self.lstm_size])
self.attn_v = tf.get_variable("v", [self.lstm_size, 1])
def _build_sampler(self):
"""Build the sampler ops and the log_prob ops."""
arc_seq = []
sample_log_probs = []
sample_entropy = []
all_h = []
all_h_w = []
# sampler ops
inputs = self.g_emb
prev_c, prev_h = [], []
for _ in xrange(self.lstm_num_layers):
prev_c.append(tf.zeros([1, self.lstm_size], dtype=tf.float32))
prev_h.append(tf.zeros([1, self.lstm_size], dtype=tf.float32))
# used = tf.zeros([self.rhn_depth, 2], dtype=tf.int32)
for layer_id in xrange(self.rhn_depth):
next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
all_h.append(next_h[-1])
all_h_w.append(tf.matmul(next_h[-1], self.attn_w_1))
if layer_id > 0:
query = tf.matmul(next_h[-1], self.attn_w_2)
query = query + tf.concat(all_h_w[:-1], axis=0)
query = tf.tanh(query)
logits = tf.matmul(query, self.attn_v)
logits = tf.reshape(logits, [1, layer_id])
if self.temperature is not None:
logits /= self.temperature
if self.tanh_constant is not None:
logits = self.tanh_constant * tf.tanh(logits)
diff = tf.to_float(layer_id - tf.range(0, layer_id)) ** 2
logits -= tf.reshape(diff, [1, layer_id]) / 12.0
skip_index = tf.multinomial(logits, 1)
skip_index = tf.to_int32(skip_index)
skip_index = tf.reshape(skip_index, [1])
arc_seq.append(skip_index)
log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=skip_index)
sample_log_probs.append(log_prob)
entropy = log_prob * tf.exp(-log_prob)
sample_entropy.append(tf.stop_gradient(entropy))
inputs = tf.nn.embedding_lookup(
tf.concat(all_h[:-1], axis=0), skip_index)
inputs /= (0.1 + tf.to_float(layer_id - skip_index))
else:
inputs = self.g_emb
next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
logits = tf.matmul(next_h[-1], self.w_soft)
if self.temperature is not None:
logits /= self.temperature
if self.tanh_constant is not None:
logits = self.tanh_constant * tf.tanh(logits)
func = tf.multinomial(logits, 1)
func = tf.to_int32(func)
func = tf.reshape(func, [1])
arc_seq.append(func)
log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=func)
sample_log_probs.append(log_prob)
entropy = log_prob * tf.exp(-log_prob)
sample_entropy.append(tf.stop_gradient(entropy))
inputs = tf.nn.embedding_lookup(self.w_emb, func)
arc_seq = tf.concat(arc_seq, axis=0)
self.sample_arc = arc_seq
self.sample_log_probs = tf.concat(sample_log_probs, axis=0)
self.ppl = tf.exp(tf.reduce_mean(self.sample_log_probs))
sample_entropy = tf.concat(sample_entropy, axis=0)
self.sample_entropy = tf.reduce_sum(sample_entropy)
self.all_h = all_h
def build_trainer(self, child_model):
# actor
self.valid_loss = tf.to_float(child_model.rl_loss)
self.valid_loss = tf.stop_gradient(self.valid_loss)
self.valid_loss = tf.minimum(self.valid_loss, 10.0)
self.valid_ppl = tf.exp(self.valid_loss)
self.reward = 80.0 / self.valid_ppl
if self.entropy_weight is not None:
self.reward += self.entropy_weight * self.sample_entropy
# or baseline
self.sample_log_probs = tf.reduce_sum(self.sample_log_probs)
self.baseline = tf.Variable(0.0, dtype=tf.float32, trainable=False)
baseline_update = tf.assign_sub(
self.baseline, (1 - self.bl_dec) * (self.baseline - self.reward))
with tf.control_dependencies([baseline_update]):
self.reward = tf.identity(self.reward)
self.loss = self.sample_log_probs * (self.reward - self.baseline)
self.train_step = tf.Variable(
0, dtype=tf.int32, trainable=False, name="train_step")
tf_variables = [var
for var in tf.trainable_variables() if var.name.startswith(self.name)]
self.train_op, self.lr, self.grad_norm, self.optimizer = get_train_ops(
self.loss,
tf_variables,
self.train_step,
clip_mode=self.clip_mode,
grad_bound=self.grad_bound,
l2_reg=self.l2_reg,
lr_init=self.lr_init,
lr_dec_start=self.lr_dec_start,
lr_dec_every=self.lr_dec_every,
lr_dec_rate=self.lr_dec_rate,
optim_algo=self.optim_algo,
sync_replicas=self.sync_replicas,
num_aggregate=self.num_aggregate,
num_replicas=self.num_replicas)
| 34.46729
| 80
| 0.656589
|
2dc9962a101f881a2d46f7c305617a4543c36550
| 2,350
|
py
|
Python
|
tests/test_html_cleaner.py
|
theseus-automl/gorgona
|
a7366d54430caa5a038488432fb93702e1cb83b8
|
[
"Apache-2.0"
] | 1
|
2021-12-12T10:47:00.000Z
|
2021-12-12T10:47:00.000Z
|
tests/test_html_cleaner.py
|
theseus-automl/gorgona
|
a7366d54430caa5a038488432fb93702e1cb83b8
|
[
"Apache-2.0"
] | 5
|
2021-12-12T10:45:04.000Z
|
2022-01-17T07:51:14.000Z
|
tests/test_html_cleaner.py
|
theseus-automl/gorgona
|
a7366d54430caa5a038488432fb93702e1cb83b8
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from gorgona.stages.cleaners import *
@pytest.fixture()
def setup_html_cleaner() -> HtmlCleaner:
html_cleaner = HtmlCleaner(
'html',
'',
)
return html_cleaner
def test_html_cleaner_no_tags(setup_html_cleaner):
assert setup_html_cleaner('hello world') == 'hello world'
# single tags
def test_html_cleaner_single_open_tag(setup_html_cleaner):
assert setup_html_cleaner('<br>') == ''
def test_html_cleaner_single_open_tag_with_attr(setup_html_cleaner):
assert setup_html_cleaner('<p color="green">') == ''
def test_html_cleaner_single_closing_tag(setup_html_cleaner):
assert setup_html_cleaner('</br>') == ''
def test_html_cleaner_single_paired_tag_without_content(setup_html_cleaner):
assert setup_html_cleaner('<p></p>') == ''
def test_html_cleaner_single_paired_tag_without_content_with_attr(setup_html_cleaner):
assert setup_html_cleaner('<p style="bold"></p>') == ''
def test_html_cleaner_single_paired_tag_with_content(setup_html_cleaner):
assert setup_html_cleaner('<p>hello, world!</p>') == 'hello, world!'
def test_html_cleaner_single_paired_tag_with_content_with_attr(setup_html_cleaner):
assert setup_html_cleaner('<p class="news">hello, world!</p>') == 'hello, world!'
# multiple tags
def test_html_cleaner_multiple_open_tags(setup_html_cleaner):
assert setup_html_cleaner('<br><a><p>') == ''
def test_html_cleaner_multiple_open_tags_with_attrs(setup_html_cleaner):
assert setup_html_cleaner('<br attr=""><a href="hello.world"><p color="red">') == ''
def test_html_cleaner_multiple_closing_tags(setup_html_cleaner):
assert setup_html_cleaner('</br></a></p>') == ''
def test_html_cleaner_multiple_paired_tags_without_content(setup_html_cleaner):
assert setup_html_cleaner('<a></a><p></p>') == ''
def test_html_cleaner_multiple_paired_tags_without_content_with_attrs(setup_html_cleaner):
assert setup_html_cleaner('<a href="hello.world"></a><p color="blue"></p>') == ''
def test_html_cleaner_multiple_paired_tags_with_content(setup_html_cleaner):
assert setup_html_cleaner('<a>link</a> <p>text</p>') == 'link text'
def test_html_cleaner_multiple_paired_tags_with_content_with_attrs(setup_html_cleaner):
assert setup_html_cleaner('<a href="hello.world">link</a> <p font="helvetica">text</p>') == 'link text'
| 30.921053
| 107
| 0.755745
|
9d328e81362dc90d7245dd8821cae1b316c87583
| 20,039
|
py
|
Python
|
tests/text/helpers.py
|
bibinwils/metrics
|
e1c3fda24f90367803c2b04315ad7c8bced719db
|
[
"Apache-2.0"
] | 4
|
2021-03-22T09:02:31.000Z
|
2021-03-23T07:35:39.000Z
|
tests/text/helpers.py
|
bibinwils/metrics
|
e1c3fda24f90367803c2b04315ad7c8bced719db
|
[
"Apache-2.0"
] | 4
|
2021-06-14T08:40:18.000Z
|
2021-07-27T20:01:08.000Z
|
tests/text/helpers.py
|
bibinwils/metrics
|
e1c3fda24f90367803c2b04315ad7c8bced719db
|
[
"Apache-2.0"
] | 2
|
2021-10-16T05:02:43.000Z
|
2022-02-10T16:01:52.000Z
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import sys
from enum import Enum, unique
from functools import partial
from typing import Any, Callable, Sequence, Union
import pytest
import torch
from torch import Tensor
from torch.multiprocessing import set_start_method
from tests.helpers.testers import MetricTester, _assert_allclose, _assert_requires_grad, _assert_tensor
from torchmetrics import Metric
try:
set_start_method("spawn")
except RuntimeError:
pass
@unique
class INPUT_ORDER(Enum):
PREDS_FIRST = 1
TARGETS_FIRST = 2
TEXT_METRIC_INPUT = Union[Sequence[str], Sequence[Sequence[str]], Sequence[Sequence[Sequence[str]]]]
NUM_BATCHES = 2
def _class_test(
rank: int,
worldsize: int,
preds: TEXT_METRIC_INPUT,
targets: TEXT_METRIC_INPUT,
metric_class: Metric,
sk_metric: Callable,
dist_sync_on_step: bool,
metric_args: dict = None,
check_dist_sync_on_step: bool = True,
check_batch: bool = True,
atol: float = 1e-8,
device: str = "cpu",
fragment_kwargs: bool = False,
check_scriptable: bool = True,
input_order: INPUT_ORDER = INPUT_ORDER.PREDS_FIRST,
key: str = None,
**kwargs_update: Any,
):
"""Utility function doing the actual comparison between lightning class metric and reference metric.
Args:
rank: rank of current process
worldsize: number of processes
preds: Sequence of predicted tokens or predicted sentences
targets: Sequence of target tokens or target sentences
metric_class: lightning metric class that should be tested
sk_metric: callable function that is used for comparison
dist_sync_on_step: bool, if true will synchronize metric state across
processes at each ``forward()``
metric_args: dict with additional arguments used for class initialization
check_dist_sync_on_step: bool, if true will check if the metric is also correctly
calculated per batch per device (and not just at the end)
check_batch: bool, if true will check if the metric is also correctly
calculated across devices for each batch (and not just at the end)
device: determine which device to run on, either 'cuda' or 'cpu'
fragment_kwargs: whether tensors in kwargs should be divided as `preds` and `targets` among processes
input_order: Define the ordering for the preds and targets positional arguments.
key: The key passed onto the `_assert_allclose` to compare the respective metric from the Dict output against
the sk_metric.
kwargs_update: Additional keyword arguments that will be passed with preds and
targets when running update on the metric.
"""
if not metric_args:
metric_args = {}
# Instanciate lightning metric
metric = metric_class(
compute_on_step=check_dist_sync_on_step or check_batch, dist_sync_on_step=dist_sync_on_step, **metric_args
)
# check that the metric is scriptable
if check_scriptable:
torch.jit.script(metric)
# move to device
metric = metric.to(device)
kwargs_update = {k: v.to(device) if isinstance(v, Tensor) else v for k, v in kwargs_update.items()}
# verify metrics work after being loaded from pickled state
pickled_metric = pickle.dumps(metric)
metric = pickle.loads(pickled_metric)
for i in range(rank, NUM_BATCHES, worldsize):
batch_kwargs_update = {k: v[i] if isinstance(v, Tensor) else v for k, v in kwargs_update.items()}
if input_order == INPUT_ORDER.PREDS_FIRST:
batch_result = metric(preds[i], targets[i], **batch_kwargs_update)
elif input_order == INPUT_ORDER.TARGETS_FIRST:
batch_result = metric(targets[i], preds[i], **batch_kwargs_update)
if metric.dist_sync_on_step and check_dist_sync_on_step and rank == 0:
# Concatenation of Sequence of strings
ddp_preds = type(preds)()
ddp_targets = type(targets)()
for r in range(worldsize):
ddp_preds = ddp_preds + preds[i + r]
ddp_targets = ddp_targets + targets[i + r]
ddp_kwargs_upd = {
k: torch.cat([v[i + r] for r in range(worldsize)]).cpu() if isinstance(v, Tensor) else v
for k, v in (kwargs_update if fragment_kwargs else batch_kwargs_update).items()
}
if input_order == INPUT_ORDER.PREDS_FIRST:
sk_batch_result = sk_metric(ddp_preds, ddp_targets, **ddp_kwargs_upd)
elif input_order == INPUT_ORDER.TARGETS_FIRST:
sk_batch_result = sk_metric(ddp_targets, ddp_preds, **ddp_kwargs_upd)
_assert_allclose(batch_result, sk_batch_result, atol=atol, key=key)
elif check_batch and not metric.dist_sync_on_step:
batch_kwargs_update = {
k: v.cpu() if isinstance(v, Tensor) else v
for k, v in (batch_kwargs_update if fragment_kwargs else kwargs_update).items()
}
if input_order == INPUT_ORDER.PREDS_FIRST:
sk_batch_result = sk_metric(preds[i], targets[i], **batch_kwargs_update)
elif input_order == INPUT_ORDER.TARGETS_FIRST:
sk_batch_result = sk_metric(targets[i], preds[i], **batch_kwargs_update)
_assert_allclose(batch_result, sk_batch_result, atol=atol, key=key)
# check that metrics are hashable
assert hash(metric)
# check on all batches on all ranks
result = metric.compute()
_assert_tensor(result, key=key)
# Concatenation of Sequence of strings
total_preds = type(preds)()
total_targets = type(targets)()
for i in range(NUM_BATCHES):
total_preds = total_preds + preds[i]
total_targets = total_targets + targets[i]
total_kwargs_update = {
k: torch.cat([v[i] for i in range(NUM_BATCHES)]).cpu() if isinstance(v, Tensor) else v
for k, v in kwargs_update.items()
}
if input_order == INPUT_ORDER.PREDS_FIRST:
sk_result = sk_metric(total_preds, total_targets, **total_kwargs_update)
elif input_order == INPUT_ORDER.TARGETS_FIRST:
sk_result = sk_metric(total_targets, total_preds, **total_kwargs_update)
# assert after aggregation
_assert_allclose(result, sk_result, atol=atol, key=key)
def _functional_test(
preds: TEXT_METRIC_INPUT,
targets: TEXT_METRIC_INPUT,
metric_functional: Callable,
sk_metric: Callable,
metric_args: dict = None,
atol: float = 1e-8,
device: str = "cpu",
fragment_kwargs: bool = False,
input_order: INPUT_ORDER = INPUT_ORDER.PREDS_FIRST,
key: str = None,
**kwargs_update,
):
"""Utility function doing the actual comparison between lightning functional metric and reference metric.
Args:
preds: torch tensor with predictions
targets: torch tensor with targets
metric_functional: lightning metric functional that should be tested
sk_metric: callable function that is used for comparison
metric_args: dict with additional arguments used for class initialization
device: determine which device to run on, either 'cuda' or 'cpu'
fragment_kwargs: whether tensors in kwargs should be divided as `preds` and `targets` among processes
input_order: Define the ordering for the preds and targets positional arguments.
key: The key passed onto the `_assert_allclose` to compare the respective metric from the Dict output against
the sk_metric.
kwargs_update: Additional keyword arguments that will be passed with preds and
targets when running update on the metric.
"""
if not metric_args:
metric_args = {}
metric = partial(metric_functional, **metric_args)
# Move to device
kwargs_update = {k: v.to(device) if isinstance(v, Tensor) else v for k, v in kwargs_update.items()}
for i in range(NUM_BATCHES):
extra_kwargs = {k: v[i] if isinstance(v, Tensor) else v for k, v in kwargs_update.items()}
if input_order == INPUT_ORDER.PREDS_FIRST:
lightning_result = metric(preds[i], targets[i], **extra_kwargs)
elif input_order == INPUT_ORDER.TARGETS_FIRST:
lightning_result = metric(targets[i], preds[i], **extra_kwargs)
extra_kwargs = {
k: v.cpu() if isinstance(v, Tensor) else v
for k, v in (extra_kwargs if fragment_kwargs else kwargs_update).items()
}
if input_order == INPUT_ORDER.PREDS_FIRST:
sk_result = sk_metric(preds[i], targets[i], **extra_kwargs)
elif input_order == INPUT_ORDER.TARGETS_FIRST:
sk_result = sk_metric(targets[i], preds[i], **extra_kwargs)
# assert its the same
_assert_allclose(lightning_result, sk_result, atol=atol, key=key)
def _assert_half_support(
metric_module: Metric,
metric_functional: Callable,
preds: TEXT_METRIC_INPUT,
targets: TEXT_METRIC_INPUT,
device: str = "cpu",
**kwargs_update,
):
"""Test if an metric can be used with half precision tensors.
Args:
metric_module: the metric module to test
metric_functional: the metric functional to test
preds: torch tensor with predictions
targets: torch tensor with targets
device: determine device, either "cpu" or "cuda"
kwargs_update: Additional keyword arguments that will be passed with preds and
targets when running update on the metric.
"""
y_hat = preds[0]
y = targets[0]
kwargs_update = {
k: (v[0].half() if v.is_floating_point() else v[0]).to(device) if isinstance(v, Tensor) else v
for k, v in kwargs_update.items()
}
metric_module = metric_module.to(device)
_assert_tensor(metric_module(y_hat, y, **kwargs_update))
_assert_tensor(metric_functional(y_hat, y, **kwargs_update))
class TextTester(MetricTester):
"""Class used for efficiently run alot of parametrized tests in ddp mode. Makes sure that ddp is only setup
once and that pool of processes are used for all tests.
All tests for text metrics should subclass from this and implement a new method called `test_metric_name` where the
method `self.run_metric_test` is called inside.
"""
def run_functional_metric_test(
self,
preds: TEXT_METRIC_INPUT,
targets: TEXT_METRIC_INPUT,
metric_functional: Callable,
sk_metric: Callable,
metric_args: dict = None,
fragment_kwargs: bool = False,
input_order: INPUT_ORDER = INPUT_ORDER.PREDS_FIRST,
key: str = None,
**kwargs_update,
):
"""Main method that should be used for testing functions. Call this inside testing method.
Args:
preds: torch tensor with predictions
targets: torch tensor with targets
metric_functional: lightning metric class that should be tested
sk_metric: callable function that is used for comparison
metric_args: dict with additional arguments used for class initialization
fragment_kwargs: whether tensors in kwargs should be divided as `preds` and `targets` among processes
input_order: Define the ordering for the preds and targets positional arguments.
key: The key passed onto the `_assert_allclose` to compare the respective metric from the Dict output
against the sk_metric.
kwargs_update: Additional keyword arguments that will be passed with preds and
targets when running update on the metric.
"""
device = "cuda" if (torch.cuda.is_available() and torch.cuda.device_count() > 0) else "cpu"
_functional_test(
preds=preds,
targets=targets,
metric_functional=metric_functional,
sk_metric=sk_metric,
metric_args=metric_args,
atol=self.atol,
device=device,
fragment_kwargs=fragment_kwargs,
input_order=input_order,
key=key,
**kwargs_update,
)
def run_class_metric_test(
self,
ddp: bool,
preds: TEXT_METRIC_INPUT,
targets: TEXT_METRIC_INPUT,
metric_class: Metric,
sk_metric: Callable,
dist_sync_on_step: bool,
metric_args: dict = None,
check_dist_sync_on_step: bool = True,
check_batch: bool = True,
fragment_kwargs: bool = False,
check_scriptable: bool = True,
input_order: INPUT_ORDER = INPUT_ORDER.PREDS_FIRST,
key: str = None,
**kwargs_update,
):
"""Main method that should be used for testing class. Call this inside testing methods.
Args:
ddp: bool, if running in ddp mode or not
preds: torch tensor with predictions
targets: torch tensor with targets
metric_class: lightning metric class that should be tested
sk_metric: callable function that is used for comparison
dist_sync_on_step: bool, if true will synchronize metric state across
processes at each ``forward()``
metric_args: dict with additional arguments used for class initialization
check_dist_sync_on_step: bool, if true will check if the metric is also correctly
calculated per batch per device (and not just at the end)
check_batch: bool, if true will check if the metric is also correctly
calculated across devices for each batch (and not just at the end)
fragment_kwargs: whether tensors in kwargs should be divided as `preds` and `targets` among processes
input_order: Define the ordering for the preds and targets positional arguments.
key: The key passed onto the `_assert_allclose` to compare the respective metric from the Dict output
against the sk_metric.
kwargs_update: Additional keyword arguments that will be passed with preds and
targets when running update on the metric.
"""
if not metric_args:
metric_args = {}
if ddp:
if sys.platform == "win32":
pytest.skip("DDP not supported on windows")
self.pool.starmap(
partial(
_class_test,
preds=preds,
targets=targets,
metric_class=metric_class,
sk_metric=sk_metric,
dist_sync_on_step=dist_sync_on_step,
metric_args=metric_args,
check_dist_sync_on_step=check_dist_sync_on_step,
check_batch=check_batch,
atol=self.atol,
fragment_kwargs=fragment_kwargs,
check_scriptable=check_scriptable,
input_order=input_order,
key=key,
**kwargs_update,
),
[(rank, self.poolSize) for rank in range(self.poolSize)],
)
else:
device = "cuda" if (torch.cuda.is_available() and torch.cuda.device_count() > 0) else "cpu"
_class_test(
rank=0,
worldsize=1,
preds=preds,
targets=targets,
metric_class=metric_class,
sk_metric=sk_metric,
dist_sync_on_step=dist_sync_on_step,
metric_args=metric_args,
check_dist_sync_on_step=check_dist_sync_on_step,
check_batch=check_batch,
atol=self.atol,
device=device,
fragment_kwargs=fragment_kwargs,
check_scriptable=check_scriptable,
input_order=input_order,
key=key,
**kwargs_update,
)
@staticmethod
def run_precision_test_cpu(
preds: TEXT_METRIC_INPUT,
targets: TEXT_METRIC_INPUT,
metric_module: Metric,
metric_functional: Callable,
metric_args: dict = None,
**kwargs_update,
):
"""Test if a metric can be used with half precision tensors on cpu
Args:
preds: torch tensor with predictions
targets: torch tensor with targets
metric_module: the metric module to test
metric_functional: the metric functional to test
metric_args: dict with additional arguments used for class initialization
kwargs_update: Additional keyword arguments that will be passed with preds and
targets when running update on the metric.
"""
metric_args = metric_args or {}
_assert_half_support(
metric_module(**metric_args), metric_functional, preds, targets, device="cpu", **kwargs_update
)
@staticmethod
def run_precision_test_gpu(
preds: TEXT_METRIC_INPUT,
targets: TEXT_METRIC_INPUT,
metric_module: Metric,
metric_functional: Callable,
metric_args: dict = None,
**kwargs_update,
):
"""Test if a metric can be used with half precision tensors on gpu
Args:
preds: torch tensor with predictions
targets: torch tensor with targets
metric_module: the metric module to test
metric_functional: the metric functional to test
metric_args: dict with additional arguments used for class initialization
kwargs_update: Additional keyword arguments that will be passed with preds and
targets when running update on the metric.
"""
metric_args = metric_args or {}
_assert_half_support(
metric_module(**metric_args), metric_functional, preds, targets, device="cuda", **kwargs_update
)
@staticmethod
def run_differentiability_test(
preds: TEXT_METRIC_INPUT,
targets: TEXT_METRIC_INPUT,
metric_module: Metric,
metric_functional: Callable,
metric_args: dict = None,
input_order: INPUT_ORDER = INPUT_ORDER.PREDS_FIRST,
key: str = None,
):
"""Test if a metric is differentiable or not.
Args:
preds: torch tensor with predictions
targets: torch tensor with targets
metric_module: the metric module to test
metric_args: dict with additional arguments used for class initialization
input_order: Define the ordering for the preds and targets positional arguments.
key: The key passed onto the `_assert_allclose` to compare the respective metric from the Dict output
against the sk_metric.
"""
metric_args = metric_args or {}
# only floating point tensors can require grad
metric = metric_module(**metric_args)
if input_order == INPUT_ORDER.PREDS_FIRST:
out = metric(preds[0], targets[0])
elif input_order == INPUT_ORDER.TARGETS_FIRST:
out = metric(targets[0], preds[0])
# Check if requires_grad matches is_differentiable attribute
_assert_requires_grad(metric, out, key=key)
if metric.is_differentiable:
# check for numerical correctness
assert torch.autograd.gradcheck(partial(metric_functional, **metric_args), (preds[0], targets[0]))
| 41.747917
| 119
| 0.653975
|
edb037153683fc5e77e8700b4b9145a7be6758dd
| 1,467
|
py
|
Python
|
communication/migrations/0023_auto_20161026_1503.py
|
stewardshiptools/stewardshiptools
|
ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e
|
[
"MIT"
] | null | null | null |
communication/migrations/0023_auto_20161026_1503.py
|
stewardshiptools/stewardshiptools
|
ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e
|
[
"MIT"
] | 11
|
2020-03-24T15:29:46.000Z
|
2022-03-11T23:14:48.000Z
|
communication/migrations/0023_auto_20161026_1503.py
|
stewardshiptools/stewardshiptools
|
ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
'''
Migration 0023, 0024, and 0025 form 3 parts of custom migration
to remove asset inheritance on messageattachment model.
Needed a new ID pk field but it had to be added as null=True,
calculated (0024) and then set to PK and null=False (0025)
'''
# def nullify_fax_document_fks(apps, schema_editor):
# from communication.models import Fax
# Fax.objects.all().update(document=None)
class Migration(migrations.Migration):
dependencies = [
('communication', '0022_auto_20161021_2200'),
]
operations = [
migrations.RemoveField(
model_name='messageattachment',
name='communicationasset_ptr',
),
# Setting to null makes this reversable; the field gets dropped in a future mig. anyways.
migrations.AddField(
model_name='messageattachment',
name='file',
field=models.ForeignKey(to='communication.CommunicationFileRelation', null=True),
preserve_default=False,
),
migrations.AddField(
model_name='messageattachment',
name='id',
# field=models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', null=True, serialize=False),
field=models.IntegerField(verbose_name='ID', null=True, serialize=False),
preserve_default=False,
),
]
| 32.6
| 121
| 0.662577
|
d7558814c02dcba8aa17a948bc90e92df8c45977
| 1,468
|
py
|
Python
|
discourse_sso_oidc_bridge/group_processor.py
|
ThoreKr/discourse-sso-oidc-bridge
|
bade9fd83142bca335ea1dbb7f7cae2700ffbacb
|
[
"Apache-2.0"
] | null | null | null |
discourse_sso_oidc_bridge/group_processor.py
|
ThoreKr/discourse-sso-oidc-bridge
|
bade9fd83142bca335ea1dbb7f7cae2700ffbacb
|
[
"Apache-2.0"
] | null | null | null |
discourse_sso_oidc_bridge/group_processor.py
|
ThoreKr/discourse-sso-oidc-bridge
|
bade9fd83142bca335ea1dbb7f7cae2700ffbacb
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, List
def process_group_mappings(
group_mapping_config: Dict[str, dict], sso_attributes: dict, groups: List[str]
):
"""Processes the groups from the mapping rule config into discourse compatible formats
Args:
group_mapping_config (Dict[str, dict]): Predefined mapping rules for the keycloak group tree
{
"<group_name>": {
"name": "<group name in discourse>",
"isMod": false,
"isAdmin": false
},
...
}
sso_attributes (dict): SSO Attributes as they will be processed by the bridge
groups (List[str]): List of groups from the userinfo endpoint
"""
grps = list()
for group in groups:
mapping_rule = group_mapping_config.get(group)
if mapping_rule is None:
continue
if mapping_rule.get("isAdmin", False):
sso_attributes["admin"] = "true"
elif mapping_rule.get("isMod", False):
sso_attributes["moderator"] = "true"
else:
grps.append(mapping_rule["name"])
# Make sure the mod and admin privileges are pinned to false to trigger permission reset on group change.
for priv in ["admin", "moderator"]:
if priv not in sso_attributes:
sso_attributes[priv] = "false"
sso_attributes["groups"] = ",".join(grps)
return sso_attributes
| 34.139535
| 113
| 0.588556
|
82148527ea88e8bef6426be71c8573d8e46af4c8
| 708
|
py
|
Python
|
waterspout_api/migrations/0040_auto_20210613_1728.py
|
Water-Systems-Management-UCM/Waterspout
|
78965f1e53b09f442e278dff72c290ceac22ed60
|
[
"MIT"
] | 1
|
2020-09-10T20:43:24.000Z
|
2020-09-10T20:43:24.000Z
|
waterspout_api/migrations/0040_auto_20210613_1728.py
|
Water-Systems-Management-UCM/Waterspout
|
78965f1e53b09f442e278dff72c290ceac22ed60
|
[
"MIT"
] | 72
|
2020-05-28T17:20:12.000Z
|
2022-03-28T14:11:40.000Z
|
waterspout_api/migrations/0040_auto_20210613_1728.py
|
Water-Systems-Management-UCM/Waterspout
|
78965f1e53b09f442e278dff72c290ceac22ed60
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2 on 2021-06-14 00:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('waterspout_api', '0039_userprofile_dense_tables'),
]
operations = [
migrations.AlterField(
model_name='cropmodification',
name='region',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='crop_modifications', to='waterspout_api.region'),
),
migrations.AlterUniqueTogether(
name='cropmodification',
unique_together={('model_run', 'crop', 'region')},
),
]
| 29.5
| 167
| 0.649718
|
514c6fc5e609d67fe70862c3146b02fb9f1e8c9a
| 7,992
|
py
|
Python
|
build/lib.macosx-10.9-x86_64-3.9/gators/feature_generation/tests/test_is_equal.py
|
Aditya-Kapadiya/gators
|
d7c9967e3a8e304a601b6a92ad834d03d3e36338
|
[
"Apache-2.0"
] | 4
|
2021-10-29T18:20:52.000Z
|
2022-03-31T22:53:03.000Z
|
build/lib.macosx-10.9-x86_64-3.9/gators/feature_generation/tests/test_is_equal.py
|
Aditya-Kapadiya/gators
|
d7c9967e3a8e304a601b6a92ad834d03d3e36338
|
[
"Apache-2.0"
] | 1
|
2022-02-21T20:02:16.000Z
|
2022-02-21T20:02:16.000Z
|
build/lib.macosx-10.9-x86_64-3.9/gators/feature_generation/tests/test_is_equal.py
|
Aditya-Kapadiya/gators
|
d7c9967e3a8e304a601b6a92ad834d03d3e36338
|
[
"Apache-2.0"
] | 5
|
2021-11-17T20:16:54.000Z
|
2022-02-21T18:21:02.000Z
|
# License: Apache-2.0
from gators.feature_generation.is_equal import IsEqual
from pandas.testing import assert_frame_equal
import pytest
import numpy as np
import pandas as pd
import databricks.koalas as ks
ks.set_option('compute.default_index_type', 'distributed-sequence')
@pytest.fixture
def data():
X = pd.DataFrame(
{'A': [99., 1., 2.],
'B': [99., 4., 5.],
'C': [99., 7., 8.]})
X_expected = pd.DataFrame(
{'A': [99., 1., 2.],
'B': [99., 4., 5.],
'C': [99., 7., 8.],
'A__is__B': [1., 0., 0.],
'A__is__C': [1., 0., 0.]})
obj = IsEqual(columns_a=list('AA'), columns_b=list('BC')).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_int16():
X = pd.DataFrame(
{'A': [99., 1., 2.],
'B': [99., 4., 5.],
'C': [99., 7., 8.]}).astype(np.int16)
X_expected = pd.DataFrame(
{'A': [99., 1., 2.],
'B': [99., 4., 5.],
'C': [99., 7., 8.],
'A__is__B': [1., 0., 0.],
'A__is__C': [1., 0., 0.]}).astype(np.int16)
obj = IsEqual(columns_a=list('AA'), columns_b=list('BC')).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_obj():
X = pd.DataFrame(
{'A': ['a', 'b', 'c'],
'B': ['a', 'f', 'e'],
'C': ['a', 'p', 'd'],
'D': [1, 2, 3]})
X_expected = pd.DataFrame(
{'A': ['a', 'b', 'c'],
'B': ['a', 'f', 'e'],
'C': ['a', 'p', 'd'],
'D': [1, 2, 3],
'A__is__B': [1., 0., 0.],
'A__is__C': [1., 0., 0.]})
obj = IsEqual(columns_a=list('AA'), columns_b=list('BC')).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_names():
X = pd.DataFrame(
{'A': [99., 1., 2.],
'B': [99., 4., 5.],
'C': [99., 7., 8.]})
X_expected = pd.DataFrame(
{'A': [99., 1., 2.],
'B': [99., 4., 5.],
'C': [99., 7., 8.],
'A==B': [1., 0., 0.],
'A==C': [1., 0., 0.]})
obj = IsEqual(columns_a=list('AA'), columns_b=list('BC'),
column_names=['A==B', 'A==C']).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_ks():
X = ks.DataFrame(
{'A': [99., 1., 2.],
'B': [99., 4., 5.],
'C': [99., 7., 8.]})
X_expected = pd.DataFrame(
{'A': [99., 1., 2.],
'B': [99., 4., 5.],
'C': [99., 7., 8.],
'A__is__B': [1., 0., 0.],
'A__is__C': [1., 0., 0.]})
obj = IsEqual(columns_a=list('AA'), columns_b=list('BC')).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_int16_ks():
X = ks.DataFrame(
{'A': [99., 1., 2.],
'B': [99., 4., 5.],
'C': [99., 7., 8.]}).astype(np.int16)
X_expected = pd.DataFrame(
{'A': [99., 1., 2.],
'B': [99., 4., 5.],
'C': [99., 7., 8.],
'A__is__B': [1., 0., 0.],
'A__is__C': [1., 0., 0.]}).astype(np.int16)
obj = IsEqual(columns_a=list('AA'), columns_b=list('BC')).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_obj_ks():
X = ks.DataFrame(
{'A': ['a', 'b', 'c'],
'B': ['a', 'f', 'e'],
'C': ['a', 'p', 'd'],
'D': [1, 2, 3]})
X_expected = pd.DataFrame(
{'A': ['a', 'b', 'c'],
'B': ['a', 'f', 'e'],
'C': ['a', 'p', 'd'],
'D': [1, 2, 3],
'A__is__B': [1., 0., 0.],
'A__is__C': [1., 0., 0.]})
obj = IsEqual(columns_a=list('AA'), columns_b=list('BC')).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_names_ks():
X = ks.DataFrame(
{'A': [99., 1., 2.],
'B': [99., 4., 5.],
'C': [99., 7., 8.]})
X_expected = pd.DataFrame(
{'A': [99., 1., 2.],
'B': [99., 4., 5.],
'C': [99., 7., 8.],
'A==B': [1., 0., 0.],
'A==C': [1., 0., 0.]})
obj = IsEqual(
columns_a=list('AA'), columns_b=list('BC'),
column_names=['A==B', 'A==C']).fit(X)
return obj, X, X_expected
def test_pd(data):
obj, X, X_expected = data
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_ks(data_ks):
obj, X, X_expected = data_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_pd_np(data):
obj, X, X_expected = data
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values.astype(np.float64))
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_ks_np(data_ks):
obj, X, X_expected = data_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values.astype(np.float64))
assert_frame_equal(X_new, X_expected)
def test_int16_pd(data_int16):
obj, X, X_expected = data_int16
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_int16_ks(data_int16_ks):
obj, X, X_expected = data_int16_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_int16_pd_np(data_int16):
obj, X, X_expected = data_int16
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_int16_ks_np(data_int16_ks):
obj, X, X_expected = data_int16_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
def test_obj(data_obj):
obj, X, X_expected = data_obj
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_obj_ks(data_obj_ks):
obj, X, X_expected = data_obj_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_obj_np(data_obj):
obj, X, X_expected = data_obj
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_obj_ks_np(data_obj_ks):
obj, X, X_expected = data_obj_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
def test_names_pd(data_names):
obj, X, X_expected = data_names
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_names_ks(data_names_ks):
obj, X, X_expected = data_names_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_names_pd_np(data_names):
obj, X, X_expected = data_names
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values.astype(np.float64))
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_names_ks_np(data_names_ks):
obj, X, X_expected = data_names_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values.astype(np.float64))
assert_frame_equal(X_new, X_expected)
def test_input():
with pytest.raises(TypeError):
_ = IsEqual(columns_a=0, columns_b=['B'])
with pytest.raises(TypeError):
_ = IsEqual(columns_a=['A'], columns_b=0)
with pytest.raises(TypeError):
_ = IsEqual(columns_a=['A'], columns_b=['B'], column_names=0)
with pytest.raises(ValueError):
_ = IsEqual(columns_a=['A'], columns_b=['B', 'C'])
with pytest.raises(ValueError):
_ = IsEqual(columns_a=['A'], columns_b=['B'], column_names=['x', 'y'])
with pytest.raises(ValueError):
_ = IsEqual(columns_a=[], columns_b=[])
| 28.140845
| 78
| 0.567067
|
b74f013921b804f8b9e6548450eaf9285d5c6dde
| 311
|
py
|
Python
|
users/urls.py
|
redhat-beyond/roo.me
|
7d711c1828a8951c7e29be796f9f26ed61e2b683
|
[
"MIT"
] | 7
|
2021-03-08T18:15:44.000Z
|
2021-05-24T21:04:24.000Z
|
users/urls.py
|
AmitAharoni/roo.me
|
eb93ed99ab8635543a5e9489893e6718344ddfd9
|
[
"MIT"
] | 232
|
2021-03-08T15:35:56.000Z
|
2021-06-06T21:33:10.000Z
|
users/urls.py
|
AmitAharoni/roo.me
|
eb93ed99ab8635543a5e9489893e6718344ddfd9
|
[
"MIT"
] | 7
|
2021-03-08T13:11:46.000Z
|
2021-03-22T06:58:39.000Z
|
from django.urls import path
from . import views as user_views
urlpatterns = [
path('update/', user_views.update_user, name='update-user'),
path('password_change/', user_views.password_change, name='change-password'),
path('<int:user_id>/details', user_views.user_details, name='user-details'),
]
| 31.1
| 81
| 0.729904
|
c21db1d26d92bcf337fcfdab4effa89ce267769c
| 185
|
py
|
Python
|
pluggie/exceptions.py
|
local-minimum/pluggie
|
526944f2741f8c04e9444fcbdbf6460aaf0e6438
|
[
"MIT"
] | null | null | null |
pluggie/exceptions.py
|
local-minimum/pluggie
|
526944f2741f8c04e9444fcbdbf6460aaf0e6438
|
[
"MIT"
] | null | null | null |
pluggie/exceptions.py
|
local-minimum/pluggie
|
526944f2741f8c04e9444fcbdbf6460aaf0e6438
|
[
"MIT"
] | null | null | null |
class PluggieError(Exception):
pass
class SignatureError(PluggieError):
pass
class PluginLoadError(PluggieError):
pass
class EventTriggerError(PluggieError):
pass
| 12.333333
| 38
| 0.751351
|
9952bd1e8115efe971c6183b42504f9ddc8270fd
| 979
|
py
|
Python
|
manage.py
|
christopherjmedlin/blog.christophermedlin.me
|
b1d1c10ab304a9f31b90b439e9cddd44b173d6a0
|
[
"MIT"
] | null | null | null |
manage.py
|
christopherjmedlin/blog.christophermedlin.me
|
b1d1c10ab304a9f31b90b439e9cddd44b173d6a0
|
[
"MIT"
] | null | null | null |
manage.py
|
christopherjmedlin/blog.christophermedlin.me
|
b1d1c10ab304a9f31b90b439e9cddd44b173d6a0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from flask_script import Manager
import flask_s3
from werkzeug.security import generate_password_hash
import getpass
import pytest
from blog import app, mongo
manager = Manager(app)
@manager.command
def createuser():
username = input("Username: ")
pswd = getpass.getpass("Password: ")
first_name = input("First name: ")
last_name = input("Last name: ")
password = generate_password_hash(pswd)
document = {
"username": username,
"password": password,
"first_name": first_name,
"last_name": last_name
}
mongo.db.users.insert_one(document)
@manager.command
def rmuser():
username = input("Type the username of the user you want to remove: ")
mongo.db.users.delete_one({"username": username})
@manager.command
def s3upload():
flask_s3.create_all(app)
@manager.command
def test():
pytest.main(["blog/tests.py"])
if __name__ == "__main__":
manager.run()
| 20.829787
| 74
| 0.675179
|
49ccb5147cf6893074c35931d3e70bafbf15d1a7
| 15,424
|
py
|
Python
|
appengine/monorail/services/chart_svc.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | null | null | null |
appengine/monorail/services/chart_svc.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | 7
|
2022-02-15T01:11:37.000Z
|
2022-03-02T12:46:13.000Z
|
appengine/monorail/services/chart_svc.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""A service for querying data for charts.
Functions for querying the IssueSnapshot table and associated join tables.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import settings
import time
from features import hotlist_helpers
from framework import framework_helpers
from framework import sql
from search import search_helpers
from tracker import tracker_bizobj
from tracker import tracker_helpers
from search import query2ast
from search import ast2select
from search import ast2ast
ISSUESNAPSHOT_TABLE_NAME = 'IssueSnapshot'
ISSUESNAPSHOT2CC_TABLE_NAME = 'IssueSnapshot2Cc'
ISSUESNAPSHOT2COMPONENT_TABLE_NAME = 'IssueSnapshot2Component'
ISSUESNAPSHOT2LABEL_TABLE_NAME = 'IssueSnapshot2Label'
ISSUESNAPSHOT_COLS = ['id', 'issue_id', 'shard', 'project_id', 'local_id',
'reporter_id', 'owner_id', 'status_id', 'period_start', 'period_end',
'is_open']
ISSUESNAPSHOT2CC_COLS = ['issuesnapshot_id', 'cc_id']
ISSUESNAPSHOT2COMPONENT_COLS = ['issuesnapshot_id', 'component_id']
ISSUESNAPSHOT2LABEL_COLS = ['issuesnapshot_id', 'label_id']
class ChartService(object):
"""Class for querying chart data."""
def __init__(self, config_service):
"""Constructor for ChartService.
Args:
config_service (ConfigService): An instance of ConfigService.
"""
self.config_service = config_service
# Set up SQL table objects.
self.issuesnapshot_tbl = sql.SQLTableManager(ISSUESNAPSHOT_TABLE_NAME)
self.issuesnapshot2cc_tbl = sql.SQLTableManager(
ISSUESNAPSHOT2CC_TABLE_NAME)
self.issuesnapshot2component_tbl = sql.SQLTableManager(
ISSUESNAPSHOT2COMPONENT_TABLE_NAME)
self.issuesnapshot2label_tbl = sql.SQLTableManager(
ISSUESNAPSHOT2LABEL_TABLE_NAME)
def QueryIssueSnapshots(self, cnxn, services, unixtime, effective_ids,
project, perms, group_by=None, label_prefix=None,
query=None, canned_query=None, hotlist=None):
"""Queries historical issue counts grouped by label or component.
Args:
cnxn: A MonorailConnection instance.
services: A Services instance.
unixtime: An integer representing the Unix time in seconds.
effective_ids: The effective User IDs associated with the current user.
project: A project object representing the current project.
perms: A permissions object associated with the current user.
group_by (str, optional): Which dimension to group by. Values can
be 'label', 'component', or None, in which case no grouping will
be applied.
label_prefix: Required when group_by is 'label.' Will limit the query to
only labels with the specified prefix (for example 'Pri').
query (str, optional): A query string from the request to apply to
the snapshot query.
canned_query (str, optional): Parsed canned query applied to the query
scope.
hotlist (Hotlist, optional): Hotlist to search under (in lieu of project).
Returns:
1. A dict of {'2nd dimension or "total"': number of occurences}.
2. A list of any unsupported query conditions in query.
3. A boolean that is true if any results were capped.
"""
if hotlist:
# TODO(jeffcarp): Get project_ids in a more efficient manner. We can
# query for "SELECT DISTINCT(project_id)" for all issues in hotlist.
issues_list = services.issue.GetIssues(cnxn,
[hotlist_issue.issue_id for hotlist_issue in hotlist.items])
hotlist_issues_project_ids = hotlist_helpers.GetAllProjectsOfIssues(
[issue for issue in issues_list])
config_list = hotlist_helpers.GetAllConfigsOfProjects(
cnxn, hotlist_issues_project_ids, services)
project_config = tracker_bizobj.HarmonizeConfigs(config_list)
else:
project_config = services.config.GetProjectConfig(cnxn,
project.project_id)
if project:
project_ids = [project.project_id]
else:
project_ids = hotlist_issues_project_ids
try:
query_left_joins, query_where, unsupported_conds = self._QueryToWhere(
cnxn, services, project_config, query, canned_query, project_ids)
except ast2select.NoPossibleResults:
return {}, ['Invalid query.'], False
restricted_label_ids = search_helpers.GetPersonalAtRiskLabelIDs(
cnxn, None, self.config_service, effective_ids, project, perms)
left_joins = [
('Issue ON IssueSnapshot.issue_id = Issue.id', []),
]
if restricted_label_ids:
left_joins.append(
(('Issue2Label AS Forbidden_label'
' ON Issue.id = Forbidden_label.issue_id'
' AND Forbidden_label.label_id IN (%s)' % (
sql.PlaceHolders(restricted_label_ids)
)), restricted_label_ids))
if effective_ids:
left_joins.append(
('Issue2Cc AS I2cc'
' ON Issue.id = I2cc.issue_id'
' AND I2cc.cc_id IN (%s)' % sql.PlaceHolders(effective_ids),
effective_ids))
# TODO(jeffcarp): Handle case where there are issues with no labels.
where = [
('IssueSnapshot.period_start <= %s', [unixtime]),
('IssueSnapshot.period_end > %s', [unixtime]),
('Issue.is_spam = %s', [False]),
('Issue.deleted = %s', [False]),
]
if project_ids:
where.append(
('IssueSnapshot.project_id IN (%s)' % sql.PlaceHolders(project_ids),
project_ids))
forbidden_label_clause = 'Forbidden_label.label_id IS NULL'
if effective_ids:
if restricted_label_ids:
forbidden_label_clause = ' OR %s' % forbidden_label_clause
else:
forbidden_label_clause = ''
where.append(
((
'(Issue.reporter_id IN (%s)'
' OR Issue.owner_id IN (%s)'
' OR I2cc.cc_id IS NOT NULL'
'%s)'
) % (
sql.PlaceHolders(effective_ids), sql.PlaceHolders(effective_ids),
forbidden_label_clause
),
list(effective_ids) + list(effective_ids)
))
else:
where.append((forbidden_label_clause, []))
if group_by == 'component':
cols = ['Comp.path', 'COUNT(IssueSnapshot.issue_id)']
left_joins.extend([
(('IssueSnapshot2Component AS Is2c ON'
' Is2c.issuesnapshot_id = IssueSnapshot.id'), []),
('ComponentDef AS Comp ON Comp.id = Is2c.component_id', []),
])
group_by = ['Comp.path']
elif group_by == 'label':
cols = ['Lab.label', 'COUNT(IssueSnapshot.issue_id)']
left_joins.extend([
(('IssueSnapshot2Label AS Is2l'
' ON Is2l.issuesnapshot_id = IssueSnapshot.id'), []),
('LabelDef AS Lab ON Lab.id = Is2l.label_id', []),
])
if not label_prefix:
raise ValueError('`label_prefix` required when grouping by label.')
# TODO(jeffcarp): If LookupIDsOfLabelsMatching() is called on output,
# ensure regex is case-insensitive.
where.append(('LOWER(Lab.label) LIKE %s', [label_prefix.lower() + '-%']))
group_by = ['Lab.label']
elif group_by == 'open':
cols = ['IssueSnapshot.is_open',
'COUNT(IssueSnapshot.issue_id) AS issue_count']
group_by = ['IssueSnapshot.is_open']
elif group_by == 'status':
left_joins.append(('StatusDef AS Stats ON ' \
'Stats.id = IssueSnapshot.status_id', []))
cols = ['Stats.status', 'COUNT(IssueSnapshot.issue_id)']
group_by = ['Stats.status']
elif group_by == 'owner':
cols = ['IssueSnapshot.owner_id', 'COUNT(IssueSnapshot.issue_id)']
group_by = ['IssueSnapshot.owner_id']
elif not group_by:
cols = ['IssueSnapshot.issue_id']
else:
raise ValueError('`group_by` must be label, component, ' \
'open, status, owner or None.')
if query_left_joins:
left_joins.extend(query_left_joins)
if query_where:
where.extend(query_where)
if hotlist:
left_joins.extend([
(('IssueSnapshot2Hotlist AS Is2h'
' ON Is2h.issuesnapshot_id = IssueSnapshot.id'
' AND Is2h.hotlist_id = %s'), [hotlist.hotlist_id]),
])
where.append(
('Is2h.hotlist_id = %s', [hotlist.hotlist_id]))
promises = []
for shard_id in range(settings.num_logical_shards):
count_stmt, stmt_args = self._BuildSnapshotQuery(cols=cols,
where=where, joins=left_joins, group_by=group_by,
shard_id=shard_id)
promises.append(framework_helpers.Promise(cnxn.Execute,
count_stmt, stmt_args, shard_id=shard_id))
shard_values_dict = {}
search_limit_reached = False
for promise in promises:
# Wait for each query to complete and add it to the dict.
shard_values = list(promise.WaitAndGetValue())
if not shard_values:
continue
if group_by:
for name, count in shard_values:
if count >= settings.chart_query_max_rows:
search_limit_reached = True
shard_values_dict.setdefault(name, 0)
shard_values_dict[name] += count
else:
if shard_values[0][0] >= settings.chart_query_max_rows:
search_limit_reached = True
shard_values_dict.setdefault('total', 0)
shard_values_dict['total'] += shard_values[0][0]
unsupported_field_names = list(set([
field.field_name
for cond in unsupported_conds
for field in cond.field_defs
]))
return shard_values_dict, unsupported_field_names, search_limit_reached
def StoreIssueSnapshots(self, cnxn, issues, commit=True):
"""Adds an IssueSnapshot and updates the previous one for each issue."""
for issue in issues:
right_now = self._currentTime()
# Update previous snapshot of current issue's end time to right now.
self.issuesnapshot_tbl.Update(cnxn,
delta={'period_end': right_now},
where=[('IssueSnapshot.issue_id = %s', [issue.issue_id]),
('IssueSnapshot.period_end = %s',
[settings.maximum_snapshot_period_end])],
commit=commit)
config = self.config_service.GetProjectConfig(cnxn, issue.project_id)
period_end = settings.maximum_snapshot_period_end
is_open = tracker_helpers.MeansOpenInProject(
tracker_bizobj.GetStatus(issue), config)
shard = issue.issue_id % settings.num_logical_shards
status = tracker_bizobj.GetStatus(issue)
status_id = self.config_service.LookupStatusID(
cnxn, issue.project_id, status) or None
owner_id = tracker_bizobj.GetOwnerId(issue) or None
issuesnapshot_rows = [(issue.issue_id, shard, issue.project_id,
issue.local_id, issue.reporter_id, owner_id, status_id, right_now,
period_end, is_open)]
ids = self.issuesnapshot_tbl.InsertRows(
cnxn, ISSUESNAPSHOT_COLS[1:],
issuesnapshot_rows,
replace=True, commit=commit,
return_generated_ids=True)
issuesnapshot_id = ids[0]
# Add all labels to IssueSnapshot2Label.
label_rows = [
(issuesnapshot_id,
self.config_service.LookupLabelID(cnxn, issue.project_id, label))
for label in tracker_bizobj.GetLabels(issue)
]
self.issuesnapshot2label_tbl.InsertRows(
cnxn, ISSUESNAPSHOT2LABEL_COLS,
label_rows, replace=True, commit=commit)
# Add all CCs to IssueSnapshot2Cc.
cc_rows = [
(issuesnapshot_id, cc_id)
for cc_id in tracker_bizobj.GetCcIds(issue)
]
self.issuesnapshot2cc_tbl.InsertRows(
cnxn, ISSUESNAPSHOT2CC_COLS,
cc_rows,
replace=True, commit=commit)
# Add all components to IssueSnapshot2Component.
component_rows = [
(issuesnapshot_id, component_id)
for component_id in issue.component_ids
]
self.issuesnapshot2component_tbl.InsertRows(
cnxn, ISSUESNAPSHOT2COMPONENT_COLS,
component_rows,
replace=True, commit=commit)
# Add all components to IssueSnapshot2Hotlist.
# This is raw SQL to obviate passing FeaturesService down through
# the call stack wherever this function is called.
# TODO(jrobbins): sort out dependencies between service classes.
cnxn.Execute('''
INSERT INTO IssueSnapshot2Hotlist (issuesnapshot_id, hotlist_id)
SELECT %s, hotlist_id FROM Hotlist2Issue WHERE issue_id = %s
''', [issuesnapshot_id, issue.issue_id])
def ExpungeHotlistsFromIssueSnapshots(self, cnxn, hotlist_ids, commit=True):
"""Expunge the existence of hotlists from issue snapshots.
Args:
cnxn: connection to SQL database.
hotlist_ids: list of hotlist_ids for hotlists we want to delete.
commit: set to False to skip the DB commit and do it in a caller.
"""
vals_ph = sql.PlaceHolders(hotlist_ids)
cnxn.Execute(
'DELETE FROM IssueSnapshot2Hotlist '
'WHERE hotlist_id IN ({vals_ph})'.format(vals_ph=vals_ph),
hotlist_ids,
commit=commit)
def _currentTime(self):
"""This is a separate method so it can be mocked by tests."""
return time.time()
def _QueryToWhere(self, cnxn, services, project_config, query, canned_query,
project_ids):
"""Parses a query string into LEFT JOIN and WHERE conditions.
Args:
cnxn: A MonorailConnection instance.
services: A Services instance.
project_config: The configuration for the given project.
query (string): The query to parse.
canned_query (string): The supplied canned query.
project_ids: The current project ID(s).
Returns:
1. A list of LEFT JOIN clauses for the SQL query.
2. A list of WHERE clases for the SQL query.
3. A list of query conditions that are unsupported with snapshots.
"""
if not (query or canned_query):
return [], [], []
query = query or ''
scope = canned_query or ''
query_ast = query2ast.ParseUserQuery(query, scope,
query2ast.BUILTIN_ISSUE_FIELDS, project_config)
query_ast = ast2ast.PreprocessAST(cnxn, query_ast, project_ids,
services, project_config)
left_joins, where, unsupported = ast2select.BuildSQLQuery(query_ast,
snapshot_mode=True)
return left_joins, where, unsupported
def _BuildSnapshotQuery(self, cols, where, joins, group_by, shard_id):
"""Given SQL arguments, executes a snapshot COUNT query."""
stmt = sql.Statement.MakeSelect('IssueSnapshot', cols, distinct=True)
stmt.AddJoinClauses(joins, left=True)
stmt.AddWhereTerms(where + [('IssueSnapshot.shard = %s', [shard_id])])
if group_by:
stmt.AddGroupByTerms(group_by)
stmt.SetLimitAndOffset(limit=settings.chart_query_max_rows, offset=0)
stmt_str, stmt_args = stmt.Generate()
if group_by:
if group_by[0] == 'IssueSnapshot.is_open':
count_stmt = ('SELECT IF(results.is_open = 1, "Opened", "Closed") ' \
'AS bool_open, results.issue_count ' \
'FROM (%s) AS results' % stmt_str)
else:
count_stmt = stmt_str
else:
count_stmt = 'SELECT COUNT(results.issue_id) FROM (%s) AS results' % (
stmt_str)
return count_stmt, stmt_args
| 37.436893
| 80
| 0.676024
|
9462aea5b80ab2fdc325f56d03b30de3b1e9d0db
| 66,830
|
py
|
Python
|
features/bert_similarity_between_tweet_and_tweets_user_vectors_feature.py
|
wantedly/recsys2020-challenge
|
d9967860cc4767380d28d2ed7af00d467cc6941a
|
[
"Apache-2.0"
] | 35
|
2020-06-23T05:33:50.000Z
|
2021-11-22T08:22:42.000Z
|
features/bert_similarity_between_tweet_and_tweets_user_vectors_feature.py
|
wantedly/recsys2020-challenge
|
d9967860cc4767380d28d2ed7af00d467cc6941a
|
[
"Apache-2.0"
] | 15
|
2020-12-28T05:31:06.000Z
|
2021-01-22T06:49:28.000Z
|
features/bert_similarity_between_tweet_and_tweets_user_vectors_feature.py
|
wantedly/recsys2020-challenge
|
d9967860cc4767380d28d2ed7af00d467cc6941a
|
[
"Apache-2.0"
] | 2
|
2020-06-30T10:02:05.000Z
|
2021-05-22T09:57:19.000Z
|
from typing import List, Tuple
from google.cloud import bigquery, bigquery_storage_v1beta1
import pandas as pd
from base import BaseFeature, reduce_mem_usage
class BertSimilarityBetweenTweetAndTweetsUserVectorsFeature(BaseFeature):
# 使わない
def import_columns(self) -> List[str]:
...
def make_features(
self, df_train_input: pd.DataFrame, df_test_input: pd.DataFrame
) -> Tuple[pd.DataFrame, pd.DataFrame]:
...
def read_and_save_features(
self,
train_table_name: str,
test_table_name: str,
train_output_path: str,
test_output_path: str,
) -> None:
df_train_features = self._read_from_bigquery(train_table_name)
df_test_features = self._read_from_bigquery(test_table_name)
df_train_features.columns = f"{self.name}_" + df_train_features.columns
df_test_features.columns = f"{self.name}_" + df_test_features.columns
if self.save_memory:
self._logger.info("Reduce memory size - train data")
df_train_features = reduce_mem_usage(df_train_features)
self._logger.info("Reduce memory size - test data")
df_test_features = reduce_mem_usage(df_test_features)
self._logger.info(f"Saving features to {train_output_path}")
df_train_features.to_feather(train_output_path)
self._logger.info(f"Saving features to {test_output_path}")
df_test_features.to_feather(test_output_path)
def _read_from_bigquery(self, table_name: str) -> pd.DataFrame:
self._logger.info(f"Reading from {table_name}")
query = _QUERY.format(table_name=table_name)
if self.debugging:
query += " limit 10000"
bqclient = bigquery.Client(project=self.PROJECT_ID)
bqstorageclient = bigquery_storage_v1beta1.BigQueryStorageClient()
df = (
bqclient.query(query)
.result()
.to_dataframe(bqstorage_client=bqstorageclient)
)
return df
_QUERY = r"""
with unique_tweets as (
select tweet_id, engaged_user_id
from `recsys2020.training` t
group by tweet_id, engaged_user_id
),
user_tweet_vectors as (
select
engaged_user_id as user_id,
avg(gap_0) as gap_0,
avg(gap_1) as gap_1,
avg(gap_2) as gap_2,
avg(gap_3) as gap_3,
avg(gap_4) as gap_4,
avg(gap_5) as gap_5,
avg(gap_6) as gap_6,
avg(gap_7) as gap_7,
avg(gap_8) as gap_8,
avg(gap_9) as gap_9,
avg(gap_10) as gap_10,
avg(gap_11) as gap_11,
avg(gap_12) as gap_12,
avg(gap_13) as gap_13,
avg(gap_14) as gap_14,
avg(gap_15) as gap_15,
avg(gap_16) as gap_16,
avg(gap_17) as gap_17,
avg(gap_18) as gap_18,
avg(gap_19) as gap_19,
avg(gap_20) as gap_20,
avg(gap_21) as gap_21,
avg(gap_22) as gap_22,
avg(gap_23) as gap_23,
avg(gap_24) as gap_24,
avg(gap_25) as gap_25,
avg(gap_26) as gap_26,
avg(gap_27) as gap_27,
avg(gap_28) as gap_28,
avg(gap_29) as gap_29,
avg(gap_30) as gap_30,
avg(gap_31) as gap_31,
avg(gap_32) as gap_32,
avg(gap_33) as gap_33,
avg(gap_34) as gap_34,
avg(gap_35) as gap_35,
avg(gap_36) as gap_36,
avg(gap_37) as gap_37,
avg(gap_38) as gap_38,
avg(gap_39) as gap_39,
avg(gap_40) as gap_40,
avg(gap_41) as gap_41,
avg(gap_42) as gap_42,
avg(gap_43) as gap_43,
avg(gap_44) as gap_44,
avg(gap_45) as gap_45,
avg(gap_46) as gap_46,
avg(gap_47) as gap_47,
avg(gap_48) as gap_48,
avg(gap_49) as gap_49,
avg(gap_50) as gap_50,
avg(gap_51) as gap_51,
avg(gap_52) as gap_52,
avg(gap_53) as gap_53,
avg(gap_54) as gap_54,
avg(gap_55) as gap_55,
avg(gap_56) as gap_56,
avg(gap_57) as gap_57,
avg(gap_58) as gap_58,
avg(gap_59) as gap_59,
avg(gap_60) as gap_60,
avg(gap_61) as gap_61,
avg(gap_62) as gap_62,
avg(gap_63) as gap_63,
avg(gap_64) as gap_64,
avg(gap_65) as gap_65,
avg(gap_66) as gap_66,
avg(gap_67) as gap_67,
avg(gap_68) as gap_68,
avg(gap_69) as gap_69,
avg(gap_70) as gap_70,
avg(gap_71) as gap_71,
avg(gap_72) as gap_72,
avg(gap_73) as gap_73,
avg(gap_74) as gap_74,
avg(gap_75) as gap_75,
avg(gap_76) as gap_76,
avg(gap_77) as gap_77,
avg(gap_78) as gap_78,
avg(gap_79) as gap_79,
avg(gap_80) as gap_80,
avg(gap_81) as gap_81,
avg(gap_82) as gap_82,
avg(gap_83) as gap_83,
avg(gap_84) as gap_84,
avg(gap_85) as gap_85,
avg(gap_86) as gap_86,
avg(gap_87) as gap_87,
avg(gap_88) as gap_88,
avg(gap_89) as gap_89,
avg(gap_90) as gap_90,
avg(gap_91) as gap_91,
avg(gap_92) as gap_92,
avg(gap_93) as gap_93,
avg(gap_94) as gap_94,
avg(gap_95) as gap_95,
avg(gap_96) as gap_96,
avg(gap_97) as gap_97,
avg(gap_98) as gap_98,
avg(gap_99) as gap_99,
avg(gap_100) as gap_100,
avg(gap_101) as gap_101,
avg(gap_102) as gap_102,
avg(gap_103) as gap_103,
avg(gap_104) as gap_104,
avg(gap_105) as gap_105,
avg(gap_106) as gap_106,
avg(gap_107) as gap_107,
avg(gap_108) as gap_108,
avg(gap_109) as gap_109,
avg(gap_110) as gap_110,
avg(gap_111) as gap_111,
avg(gap_112) as gap_112,
avg(gap_113) as gap_113,
avg(gap_114) as gap_114,
avg(gap_115) as gap_115,
avg(gap_116) as gap_116,
avg(gap_117) as gap_117,
avg(gap_118) as gap_118,
avg(gap_119) as gap_119,
avg(gap_120) as gap_120,
avg(gap_121) as gap_121,
avg(gap_122) as gap_122,
avg(gap_123) as gap_123,
avg(gap_124) as gap_124,
avg(gap_125) as gap_125,
avg(gap_126) as gap_126,
avg(gap_127) as gap_127,
avg(gap_128) as gap_128,
avg(gap_129) as gap_129,
avg(gap_130) as gap_130,
avg(gap_131) as gap_131,
avg(gap_132) as gap_132,
avg(gap_133) as gap_133,
avg(gap_134) as gap_134,
avg(gap_135) as gap_135,
avg(gap_136) as gap_136,
avg(gap_137) as gap_137,
avg(gap_138) as gap_138,
avg(gap_139) as gap_139,
avg(gap_140) as gap_140,
avg(gap_141) as gap_141,
avg(gap_142) as gap_142,
avg(gap_143) as gap_143,
avg(gap_144) as gap_144,
avg(gap_145) as gap_145,
avg(gap_146) as gap_146,
avg(gap_147) as gap_147,
avg(gap_148) as gap_148,
avg(gap_149) as gap_149,
avg(gap_150) as gap_150,
avg(gap_151) as gap_151,
avg(gap_152) as gap_152,
avg(gap_153) as gap_153,
avg(gap_154) as gap_154,
avg(gap_155) as gap_155,
avg(gap_156) as gap_156,
avg(gap_157) as gap_157,
avg(gap_158) as gap_158,
avg(gap_159) as gap_159,
avg(gap_160) as gap_160,
avg(gap_161) as gap_161,
avg(gap_162) as gap_162,
avg(gap_163) as gap_163,
avg(gap_164) as gap_164,
avg(gap_165) as gap_165,
avg(gap_166) as gap_166,
avg(gap_167) as gap_167,
avg(gap_168) as gap_168,
avg(gap_169) as gap_169,
avg(gap_170) as gap_170,
avg(gap_171) as gap_171,
avg(gap_172) as gap_172,
avg(gap_173) as gap_173,
avg(gap_174) as gap_174,
avg(gap_175) as gap_175,
avg(gap_176) as gap_176,
avg(gap_177) as gap_177,
avg(gap_178) as gap_178,
avg(gap_179) as gap_179,
avg(gap_180) as gap_180,
avg(gap_181) as gap_181,
avg(gap_182) as gap_182,
avg(gap_183) as gap_183,
avg(gap_184) as gap_184,
avg(gap_185) as gap_185,
avg(gap_186) as gap_186,
avg(gap_187) as gap_187,
avg(gap_188) as gap_188,
avg(gap_189) as gap_189,
avg(gap_190) as gap_190,
avg(gap_191) as gap_191,
avg(gap_192) as gap_192,
avg(gap_193) as gap_193,
avg(gap_194) as gap_194,
avg(gap_195) as gap_195,
avg(gap_196) as gap_196,
avg(gap_197) as gap_197,
avg(gap_198) as gap_198,
avg(gap_199) as gap_199,
avg(gap_200) as gap_200,
avg(gap_201) as gap_201,
avg(gap_202) as gap_202,
avg(gap_203) as gap_203,
avg(gap_204) as gap_204,
avg(gap_205) as gap_205,
avg(gap_206) as gap_206,
avg(gap_207) as gap_207,
avg(gap_208) as gap_208,
avg(gap_209) as gap_209,
avg(gap_210) as gap_210,
avg(gap_211) as gap_211,
avg(gap_212) as gap_212,
avg(gap_213) as gap_213,
avg(gap_214) as gap_214,
avg(gap_215) as gap_215,
avg(gap_216) as gap_216,
avg(gap_217) as gap_217,
avg(gap_218) as gap_218,
avg(gap_219) as gap_219,
avg(gap_220) as gap_220,
avg(gap_221) as gap_221,
avg(gap_222) as gap_222,
avg(gap_223) as gap_223,
avg(gap_224) as gap_224,
avg(gap_225) as gap_225,
avg(gap_226) as gap_226,
avg(gap_227) as gap_227,
avg(gap_228) as gap_228,
avg(gap_229) as gap_229,
avg(gap_230) as gap_230,
avg(gap_231) as gap_231,
avg(gap_232) as gap_232,
avg(gap_233) as gap_233,
avg(gap_234) as gap_234,
avg(gap_235) as gap_235,
avg(gap_236) as gap_236,
avg(gap_237) as gap_237,
avg(gap_238) as gap_238,
avg(gap_239) as gap_239,
avg(gap_240) as gap_240,
avg(gap_241) as gap_241,
avg(gap_242) as gap_242,
avg(gap_243) as gap_243,
avg(gap_244) as gap_244,
avg(gap_245) as gap_245,
avg(gap_246) as gap_246,
avg(gap_247) as gap_247,
avg(gap_248) as gap_248,
avg(gap_249) as gap_249,
avg(gap_250) as gap_250,
avg(gap_251) as gap_251,
avg(gap_252) as gap_252,
avg(gap_253) as gap_253,
avg(gap_254) as gap_254,
avg(gap_255) as gap_255,
avg(gap_256) as gap_256,
avg(gap_257) as gap_257,
avg(gap_258) as gap_258,
avg(gap_259) as gap_259,
avg(gap_260) as gap_260,
avg(gap_261) as gap_261,
avg(gap_262) as gap_262,
avg(gap_263) as gap_263,
avg(gap_264) as gap_264,
avg(gap_265) as gap_265,
avg(gap_266) as gap_266,
avg(gap_267) as gap_267,
avg(gap_268) as gap_268,
avg(gap_269) as gap_269,
avg(gap_270) as gap_270,
avg(gap_271) as gap_271,
avg(gap_272) as gap_272,
avg(gap_273) as gap_273,
avg(gap_274) as gap_274,
avg(gap_275) as gap_275,
avg(gap_276) as gap_276,
avg(gap_277) as gap_277,
avg(gap_278) as gap_278,
avg(gap_279) as gap_279,
avg(gap_280) as gap_280,
avg(gap_281) as gap_281,
avg(gap_282) as gap_282,
avg(gap_283) as gap_283,
avg(gap_284) as gap_284,
avg(gap_285) as gap_285,
avg(gap_286) as gap_286,
avg(gap_287) as gap_287,
avg(gap_288) as gap_288,
avg(gap_289) as gap_289,
avg(gap_290) as gap_290,
avg(gap_291) as gap_291,
avg(gap_292) as gap_292,
avg(gap_293) as gap_293,
avg(gap_294) as gap_294,
avg(gap_295) as gap_295,
avg(gap_296) as gap_296,
avg(gap_297) as gap_297,
avg(gap_298) as gap_298,
avg(gap_299) as gap_299,
avg(gap_300) as gap_300,
avg(gap_301) as gap_301,
avg(gap_302) as gap_302,
avg(gap_303) as gap_303,
avg(gap_304) as gap_304,
avg(gap_305) as gap_305,
avg(gap_306) as gap_306,
avg(gap_307) as gap_307,
avg(gap_308) as gap_308,
avg(gap_309) as gap_309,
avg(gap_310) as gap_310,
avg(gap_311) as gap_311,
avg(gap_312) as gap_312,
avg(gap_313) as gap_313,
avg(gap_314) as gap_314,
avg(gap_315) as gap_315,
avg(gap_316) as gap_316,
avg(gap_317) as gap_317,
avg(gap_318) as gap_318,
avg(gap_319) as gap_319,
avg(gap_320) as gap_320,
avg(gap_321) as gap_321,
avg(gap_322) as gap_322,
avg(gap_323) as gap_323,
avg(gap_324) as gap_324,
avg(gap_325) as gap_325,
avg(gap_326) as gap_326,
avg(gap_327) as gap_327,
avg(gap_328) as gap_328,
avg(gap_329) as gap_329,
avg(gap_330) as gap_330,
avg(gap_331) as gap_331,
avg(gap_332) as gap_332,
avg(gap_333) as gap_333,
avg(gap_334) as gap_334,
avg(gap_335) as gap_335,
avg(gap_336) as gap_336,
avg(gap_337) as gap_337,
avg(gap_338) as gap_338,
avg(gap_339) as gap_339,
avg(gap_340) as gap_340,
avg(gap_341) as gap_341,
avg(gap_342) as gap_342,
avg(gap_343) as gap_343,
avg(gap_344) as gap_344,
avg(gap_345) as gap_345,
avg(gap_346) as gap_346,
avg(gap_347) as gap_347,
avg(gap_348) as gap_348,
avg(gap_349) as gap_349,
avg(gap_350) as gap_350,
avg(gap_351) as gap_351,
avg(gap_352) as gap_352,
avg(gap_353) as gap_353,
avg(gap_354) as gap_354,
avg(gap_355) as gap_355,
avg(gap_356) as gap_356,
avg(gap_357) as gap_357,
avg(gap_358) as gap_358,
avg(gap_359) as gap_359,
avg(gap_360) as gap_360,
avg(gap_361) as gap_361,
avg(gap_362) as gap_362,
avg(gap_363) as gap_363,
avg(gap_364) as gap_364,
avg(gap_365) as gap_365,
avg(gap_366) as gap_366,
avg(gap_367) as gap_367,
avg(gap_368) as gap_368,
avg(gap_369) as gap_369,
avg(gap_370) as gap_370,
avg(gap_371) as gap_371,
avg(gap_372) as gap_372,
avg(gap_373) as gap_373,
avg(gap_374) as gap_374,
avg(gap_375) as gap_375,
avg(gap_376) as gap_376,
avg(gap_377) as gap_377,
avg(gap_378) as gap_378,
avg(gap_379) as gap_379,
avg(gap_380) as gap_380,
avg(gap_381) as gap_381,
avg(gap_382) as gap_382,
avg(gap_383) as gap_383,
avg(gap_384) as gap_384,
avg(gap_385) as gap_385,
avg(gap_386) as gap_386,
avg(gap_387) as gap_387,
avg(gap_388) as gap_388,
avg(gap_389) as gap_389,
avg(gap_390) as gap_390,
avg(gap_391) as gap_391,
avg(gap_392) as gap_392,
avg(gap_393) as gap_393,
avg(gap_394) as gap_394,
avg(gap_395) as gap_395,
avg(gap_396) as gap_396,
avg(gap_397) as gap_397,
avg(gap_398) as gap_398,
avg(gap_399) as gap_399,
avg(gap_400) as gap_400,
avg(gap_401) as gap_401,
avg(gap_402) as gap_402,
avg(gap_403) as gap_403,
avg(gap_404) as gap_404,
avg(gap_405) as gap_405,
avg(gap_406) as gap_406,
avg(gap_407) as gap_407,
avg(gap_408) as gap_408,
avg(gap_409) as gap_409,
avg(gap_410) as gap_410,
avg(gap_411) as gap_411,
avg(gap_412) as gap_412,
avg(gap_413) as gap_413,
avg(gap_414) as gap_414,
avg(gap_415) as gap_415,
avg(gap_416) as gap_416,
avg(gap_417) as gap_417,
avg(gap_418) as gap_418,
avg(gap_419) as gap_419,
avg(gap_420) as gap_420,
avg(gap_421) as gap_421,
avg(gap_422) as gap_422,
avg(gap_423) as gap_423,
avg(gap_424) as gap_424,
avg(gap_425) as gap_425,
avg(gap_426) as gap_426,
avg(gap_427) as gap_427,
avg(gap_428) as gap_428,
avg(gap_429) as gap_429,
avg(gap_430) as gap_430,
avg(gap_431) as gap_431,
avg(gap_432) as gap_432,
avg(gap_433) as gap_433,
avg(gap_434) as gap_434,
avg(gap_435) as gap_435,
avg(gap_436) as gap_436,
avg(gap_437) as gap_437,
avg(gap_438) as gap_438,
avg(gap_439) as gap_439,
avg(gap_440) as gap_440,
avg(gap_441) as gap_441,
avg(gap_442) as gap_442,
avg(gap_443) as gap_443,
avg(gap_444) as gap_444,
avg(gap_445) as gap_445,
avg(gap_446) as gap_446,
avg(gap_447) as gap_447,
avg(gap_448) as gap_448,
avg(gap_449) as gap_449,
avg(gap_450) as gap_450,
avg(gap_451) as gap_451,
avg(gap_452) as gap_452,
avg(gap_453) as gap_453,
avg(gap_454) as gap_454,
avg(gap_455) as gap_455,
avg(gap_456) as gap_456,
avg(gap_457) as gap_457,
avg(gap_458) as gap_458,
avg(gap_459) as gap_459,
avg(gap_460) as gap_460,
avg(gap_461) as gap_461,
avg(gap_462) as gap_462,
avg(gap_463) as gap_463,
avg(gap_464) as gap_464,
avg(gap_465) as gap_465,
avg(gap_466) as gap_466,
avg(gap_467) as gap_467,
avg(gap_468) as gap_468,
avg(gap_469) as gap_469,
avg(gap_470) as gap_470,
avg(gap_471) as gap_471,
avg(gap_472) as gap_472,
avg(gap_473) as gap_473,
avg(gap_474) as gap_474,
avg(gap_475) as gap_475,
avg(gap_476) as gap_476,
avg(gap_477) as gap_477,
avg(gap_478) as gap_478,
avg(gap_479) as gap_479,
avg(gap_480) as gap_480,
avg(gap_481) as gap_481,
avg(gap_482) as gap_482,
avg(gap_483) as gap_483,
avg(gap_484) as gap_484,
avg(gap_485) as gap_485,
avg(gap_486) as gap_486,
avg(gap_487) as gap_487,
avg(gap_488) as gap_488,
avg(gap_489) as gap_489,
avg(gap_490) as gap_490,
avg(gap_491) as gap_491,
avg(gap_492) as gap_492,
avg(gap_493) as gap_493,
avg(gap_494) as gap_494,
avg(gap_495) as gap_495,
avg(gap_496) as gap_496,
avg(gap_497) as gap_497,
avg(gap_498) as gap_498,
avg(gap_499) as gap_499,
avg(gap_500) as gap_500,
avg(gap_501) as gap_501,
avg(gap_502) as gap_502,
avg(gap_503) as gap_503,
avg(gap_504) as gap_504,
avg(gap_505) as gap_505,
avg(gap_506) as gap_506,
avg(gap_507) as gap_507,
avg(gap_508) as gap_508,
avg(gap_509) as gap_509,
avg(gap_510) as gap_510,
avg(gap_511) as gap_511,
avg(gap_512) as gap_512,
avg(gap_513) as gap_513,
avg(gap_514) as gap_514,
avg(gap_515) as gap_515,
avg(gap_516) as gap_516,
avg(gap_517) as gap_517,
avg(gap_518) as gap_518,
avg(gap_519) as gap_519,
avg(gap_520) as gap_520,
avg(gap_521) as gap_521,
avg(gap_522) as gap_522,
avg(gap_523) as gap_523,
avg(gap_524) as gap_524,
avg(gap_525) as gap_525,
avg(gap_526) as gap_526,
avg(gap_527) as gap_527,
avg(gap_528) as gap_528,
avg(gap_529) as gap_529,
avg(gap_530) as gap_530,
avg(gap_531) as gap_531,
avg(gap_532) as gap_532,
avg(gap_533) as gap_533,
avg(gap_534) as gap_534,
avg(gap_535) as gap_535,
avg(gap_536) as gap_536,
avg(gap_537) as gap_537,
avg(gap_538) as gap_538,
avg(gap_539) as gap_539,
avg(gap_540) as gap_540,
avg(gap_541) as gap_541,
avg(gap_542) as gap_542,
avg(gap_543) as gap_543,
avg(gap_544) as gap_544,
avg(gap_545) as gap_545,
avg(gap_546) as gap_546,
avg(gap_547) as gap_547,
avg(gap_548) as gap_548,
avg(gap_549) as gap_549,
avg(gap_550) as gap_550,
avg(gap_551) as gap_551,
avg(gap_552) as gap_552,
avg(gap_553) as gap_553,
avg(gap_554) as gap_554,
avg(gap_555) as gap_555,
avg(gap_556) as gap_556,
avg(gap_557) as gap_557,
avg(gap_558) as gap_558,
avg(gap_559) as gap_559,
avg(gap_560) as gap_560,
avg(gap_561) as gap_561,
avg(gap_562) as gap_562,
avg(gap_563) as gap_563,
avg(gap_564) as gap_564,
avg(gap_565) as gap_565,
avg(gap_566) as gap_566,
avg(gap_567) as gap_567,
avg(gap_568) as gap_568,
avg(gap_569) as gap_569,
avg(gap_570) as gap_570,
avg(gap_571) as gap_571,
avg(gap_572) as gap_572,
avg(gap_573) as gap_573,
avg(gap_574) as gap_574,
avg(gap_575) as gap_575,
avg(gap_576) as gap_576,
avg(gap_577) as gap_577,
avg(gap_578) as gap_578,
avg(gap_579) as gap_579,
avg(gap_580) as gap_580,
avg(gap_581) as gap_581,
avg(gap_582) as gap_582,
avg(gap_583) as gap_583,
avg(gap_584) as gap_584,
avg(gap_585) as gap_585,
avg(gap_586) as gap_586,
avg(gap_587) as gap_587,
avg(gap_588) as gap_588,
avg(gap_589) as gap_589,
avg(gap_590) as gap_590,
avg(gap_591) as gap_591,
avg(gap_592) as gap_592,
avg(gap_593) as gap_593,
avg(gap_594) as gap_594,
avg(gap_595) as gap_595,
avg(gap_596) as gap_596,
avg(gap_597) as gap_597,
avg(gap_598) as gap_598,
avg(gap_599) as gap_599,
avg(gap_600) as gap_600,
avg(gap_601) as gap_601,
avg(gap_602) as gap_602,
avg(gap_603) as gap_603,
avg(gap_604) as gap_604,
avg(gap_605) as gap_605,
avg(gap_606) as gap_606,
avg(gap_607) as gap_607,
avg(gap_608) as gap_608,
avg(gap_609) as gap_609,
avg(gap_610) as gap_610,
avg(gap_611) as gap_611,
avg(gap_612) as gap_612,
avg(gap_613) as gap_613,
avg(gap_614) as gap_614,
avg(gap_615) as gap_615,
avg(gap_616) as gap_616,
avg(gap_617) as gap_617,
avg(gap_618) as gap_618,
avg(gap_619) as gap_619,
avg(gap_620) as gap_620,
avg(gap_621) as gap_621,
avg(gap_622) as gap_622,
avg(gap_623) as gap_623,
avg(gap_624) as gap_624,
avg(gap_625) as gap_625,
avg(gap_626) as gap_626,
avg(gap_627) as gap_627,
avg(gap_628) as gap_628,
avg(gap_629) as gap_629,
avg(gap_630) as gap_630,
avg(gap_631) as gap_631,
avg(gap_632) as gap_632,
avg(gap_633) as gap_633,
avg(gap_634) as gap_634,
avg(gap_635) as gap_635,
avg(gap_636) as gap_636,
avg(gap_637) as gap_637,
avg(gap_638) as gap_638,
avg(gap_639) as gap_639,
avg(gap_640) as gap_640,
avg(gap_641) as gap_641,
avg(gap_642) as gap_642,
avg(gap_643) as gap_643,
avg(gap_644) as gap_644,
avg(gap_645) as gap_645,
avg(gap_646) as gap_646,
avg(gap_647) as gap_647,
avg(gap_648) as gap_648,
avg(gap_649) as gap_649,
avg(gap_650) as gap_650,
avg(gap_651) as gap_651,
avg(gap_652) as gap_652,
avg(gap_653) as gap_653,
avg(gap_654) as gap_654,
avg(gap_655) as gap_655,
avg(gap_656) as gap_656,
avg(gap_657) as gap_657,
avg(gap_658) as gap_658,
avg(gap_659) as gap_659,
avg(gap_660) as gap_660,
avg(gap_661) as gap_661,
avg(gap_662) as gap_662,
avg(gap_663) as gap_663,
avg(gap_664) as gap_664,
avg(gap_665) as gap_665,
avg(gap_666) as gap_666,
avg(gap_667) as gap_667,
avg(gap_668) as gap_668,
avg(gap_669) as gap_669,
avg(gap_670) as gap_670,
avg(gap_671) as gap_671,
avg(gap_672) as gap_672,
avg(gap_673) as gap_673,
avg(gap_674) as gap_674,
avg(gap_675) as gap_675,
avg(gap_676) as gap_676,
avg(gap_677) as gap_677,
avg(gap_678) as gap_678,
avg(gap_679) as gap_679,
avg(gap_680) as gap_680,
avg(gap_681) as gap_681,
avg(gap_682) as gap_682,
avg(gap_683) as gap_683,
avg(gap_684) as gap_684,
avg(gap_685) as gap_685,
avg(gap_686) as gap_686,
avg(gap_687) as gap_687,
avg(gap_688) as gap_688,
avg(gap_689) as gap_689,
avg(gap_690) as gap_690,
avg(gap_691) as gap_691,
avg(gap_692) as gap_692,
avg(gap_693) as gap_693,
avg(gap_694) as gap_694,
avg(gap_695) as gap_695,
avg(gap_696) as gap_696,
avg(gap_697) as gap_697,
avg(gap_698) as gap_698,
avg(gap_699) as gap_699,
avg(gap_700) as gap_700,
avg(gap_701) as gap_701,
avg(gap_702) as gap_702,
avg(gap_703) as gap_703,
avg(gap_704) as gap_704,
avg(gap_705) as gap_705,
avg(gap_706) as gap_706,
avg(gap_707) as gap_707,
avg(gap_708) as gap_708,
avg(gap_709) as gap_709,
avg(gap_710) as gap_710,
avg(gap_711) as gap_711,
avg(gap_712) as gap_712,
avg(gap_713) as gap_713,
avg(gap_714) as gap_714,
avg(gap_715) as gap_715,
avg(gap_716) as gap_716,
avg(gap_717) as gap_717,
avg(gap_718) as gap_718,
avg(gap_719) as gap_719,
avg(gap_720) as gap_720,
avg(gap_721) as gap_721,
avg(gap_722) as gap_722,
avg(gap_723) as gap_723,
avg(gap_724) as gap_724,
avg(gap_725) as gap_725,
avg(gap_726) as gap_726,
avg(gap_727) as gap_727,
avg(gap_728) as gap_728,
avg(gap_729) as gap_729,
avg(gap_730) as gap_730,
avg(gap_731) as gap_731,
avg(gap_732) as gap_732,
avg(gap_733) as gap_733,
avg(gap_734) as gap_734,
avg(gap_735) as gap_735,
avg(gap_736) as gap_736,
avg(gap_737) as gap_737,
avg(gap_738) as gap_738,
avg(gap_739) as gap_739,
avg(gap_740) as gap_740,
avg(gap_741) as gap_741,
avg(gap_742) as gap_742,
avg(gap_743) as gap_743,
avg(gap_744) as gap_744,
avg(gap_745) as gap_745,
avg(gap_746) as gap_746,
avg(gap_747) as gap_747,
avg(gap_748) as gap_748,
avg(gap_749) as gap_749,
avg(gap_750) as gap_750,
avg(gap_751) as gap_751,
avg(gap_752) as gap_752,
avg(gap_753) as gap_753,
avg(gap_754) as gap_754,
avg(gap_755) as gap_755,
avg(gap_756) as gap_756,
avg(gap_757) as gap_757,
avg(gap_758) as gap_758,
avg(gap_759) as gap_759,
avg(gap_760) as gap_760,
avg(gap_761) as gap_761,
avg(gap_762) as gap_762,
avg(gap_763) as gap_763,
avg(gap_764) as gap_764,
avg(gap_765) as gap_765,
avg(gap_766) as gap_766,
avg(gap_767) as gap_767
from unique_tweets
inner join `recsys2020.pretrained_bert_gap` gap on unique_tweets.tweet_id = gap.tweet_id
group by user_id
)
select
1.0 / 768 * (
(tweet_gap.gap_0 * user_tweet_vectors.gap_0) +
(tweet_gap.gap_1 * user_tweet_vectors.gap_1) +
(tweet_gap.gap_2 * user_tweet_vectors.gap_2) +
(tweet_gap.gap_3 * user_tweet_vectors.gap_3) +
(tweet_gap.gap_4 * user_tweet_vectors.gap_4) +
(tweet_gap.gap_5 * user_tweet_vectors.gap_5) +
(tweet_gap.gap_6 * user_tweet_vectors.gap_6) +
(tweet_gap.gap_7 * user_tweet_vectors.gap_7) +
(tweet_gap.gap_8 * user_tweet_vectors.gap_8) +
(tweet_gap.gap_9 * user_tweet_vectors.gap_9) +
(tweet_gap.gap_10 * user_tweet_vectors.gap_10) +
(tweet_gap.gap_11 * user_tweet_vectors.gap_11) +
(tweet_gap.gap_12 * user_tweet_vectors.gap_12) +
(tweet_gap.gap_13 * user_tweet_vectors.gap_13) +
(tweet_gap.gap_14 * user_tweet_vectors.gap_14) +
(tweet_gap.gap_15 * user_tweet_vectors.gap_15) +
(tweet_gap.gap_16 * user_tweet_vectors.gap_16) +
(tweet_gap.gap_17 * user_tweet_vectors.gap_17) +
(tweet_gap.gap_18 * user_tweet_vectors.gap_18) +
(tweet_gap.gap_19 * user_tweet_vectors.gap_19) +
(tweet_gap.gap_20 * user_tweet_vectors.gap_20) +
(tweet_gap.gap_21 * user_tweet_vectors.gap_21) +
(tweet_gap.gap_22 * user_tweet_vectors.gap_22) +
(tweet_gap.gap_23 * user_tweet_vectors.gap_23) +
(tweet_gap.gap_24 * user_tweet_vectors.gap_24) +
(tweet_gap.gap_25 * user_tweet_vectors.gap_25) +
(tweet_gap.gap_26 * user_tweet_vectors.gap_26) +
(tweet_gap.gap_27 * user_tweet_vectors.gap_27) +
(tweet_gap.gap_28 * user_tweet_vectors.gap_28) +
(tweet_gap.gap_29 * user_tweet_vectors.gap_29) +
(tweet_gap.gap_30 * user_tweet_vectors.gap_30) +
(tweet_gap.gap_31 * user_tweet_vectors.gap_31) +
(tweet_gap.gap_32 * user_tweet_vectors.gap_32) +
(tweet_gap.gap_33 * user_tweet_vectors.gap_33) +
(tweet_gap.gap_34 * user_tweet_vectors.gap_34) +
(tweet_gap.gap_35 * user_tweet_vectors.gap_35) +
(tweet_gap.gap_36 * user_tweet_vectors.gap_36) +
(tweet_gap.gap_37 * user_tweet_vectors.gap_37) +
(tweet_gap.gap_38 * user_tweet_vectors.gap_38) +
(tweet_gap.gap_39 * user_tweet_vectors.gap_39) +
(tweet_gap.gap_40 * user_tweet_vectors.gap_40) +
(tweet_gap.gap_41 * user_tweet_vectors.gap_41) +
(tweet_gap.gap_42 * user_tweet_vectors.gap_42) +
(tweet_gap.gap_43 * user_tweet_vectors.gap_43) +
(tweet_gap.gap_44 * user_tweet_vectors.gap_44) +
(tweet_gap.gap_45 * user_tweet_vectors.gap_45) +
(tweet_gap.gap_46 * user_tweet_vectors.gap_46) +
(tweet_gap.gap_47 * user_tweet_vectors.gap_47) +
(tweet_gap.gap_48 * user_tweet_vectors.gap_48) +
(tweet_gap.gap_49 * user_tweet_vectors.gap_49) +
(tweet_gap.gap_50 * user_tweet_vectors.gap_50) +
(tweet_gap.gap_51 * user_tweet_vectors.gap_51) +
(tweet_gap.gap_52 * user_tweet_vectors.gap_52) +
(tweet_gap.gap_53 * user_tweet_vectors.gap_53) +
(tweet_gap.gap_54 * user_tweet_vectors.gap_54) +
(tweet_gap.gap_55 * user_tweet_vectors.gap_55) +
(tweet_gap.gap_56 * user_tweet_vectors.gap_56) +
(tweet_gap.gap_57 * user_tweet_vectors.gap_57) +
(tweet_gap.gap_58 * user_tweet_vectors.gap_58) +
(tweet_gap.gap_59 * user_tweet_vectors.gap_59) +
(tweet_gap.gap_60 * user_tweet_vectors.gap_60) +
(tweet_gap.gap_61 * user_tweet_vectors.gap_61) +
(tweet_gap.gap_62 * user_tweet_vectors.gap_62) +
(tweet_gap.gap_63 * user_tweet_vectors.gap_63) +
(tweet_gap.gap_64 * user_tweet_vectors.gap_64) +
(tweet_gap.gap_65 * user_tweet_vectors.gap_65) +
(tweet_gap.gap_66 * user_tweet_vectors.gap_66) +
(tweet_gap.gap_67 * user_tweet_vectors.gap_67) +
(tweet_gap.gap_68 * user_tweet_vectors.gap_68) +
(tweet_gap.gap_69 * user_tweet_vectors.gap_69) +
(tweet_gap.gap_70 * user_tweet_vectors.gap_70) +
(tweet_gap.gap_71 * user_tweet_vectors.gap_71) +
(tweet_gap.gap_72 * user_tweet_vectors.gap_72) +
(tweet_gap.gap_73 * user_tweet_vectors.gap_73) +
(tweet_gap.gap_74 * user_tweet_vectors.gap_74) +
(tweet_gap.gap_75 * user_tweet_vectors.gap_75) +
(tweet_gap.gap_76 * user_tweet_vectors.gap_76) +
(tweet_gap.gap_77 * user_tweet_vectors.gap_77) +
(tweet_gap.gap_78 * user_tweet_vectors.gap_78) +
(tweet_gap.gap_79 * user_tweet_vectors.gap_79) +
(tweet_gap.gap_80 * user_tweet_vectors.gap_80) +
(tweet_gap.gap_81 * user_tweet_vectors.gap_81) +
(tweet_gap.gap_82 * user_tweet_vectors.gap_82) +
(tweet_gap.gap_83 * user_tweet_vectors.gap_83) +
(tweet_gap.gap_84 * user_tweet_vectors.gap_84) +
(tweet_gap.gap_85 * user_tweet_vectors.gap_85) +
(tweet_gap.gap_86 * user_tweet_vectors.gap_86) +
(tweet_gap.gap_87 * user_tweet_vectors.gap_87) +
(tweet_gap.gap_88 * user_tweet_vectors.gap_88) +
(tweet_gap.gap_89 * user_tweet_vectors.gap_89) +
(tweet_gap.gap_90 * user_tweet_vectors.gap_90) +
(tweet_gap.gap_91 * user_tweet_vectors.gap_91) +
(tweet_gap.gap_92 * user_tweet_vectors.gap_92) +
(tweet_gap.gap_93 * user_tweet_vectors.gap_93) +
(tweet_gap.gap_94 * user_tweet_vectors.gap_94) +
(tweet_gap.gap_95 * user_tweet_vectors.gap_95) +
(tweet_gap.gap_96 * user_tweet_vectors.gap_96) +
(tweet_gap.gap_97 * user_tweet_vectors.gap_97) +
(tweet_gap.gap_98 * user_tweet_vectors.gap_98) +
(tweet_gap.gap_99 * user_tweet_vectors.gap_99) +
(tweet_gap.gap_100 * user_tweet_vectors.gap_100) +
(tweet_gap.gap_101 * user_tweet_vectors.gap_101) +
(tweet_gap.gap_102 * user_tweet_vectors.gap_102) +
(tweet_gap.gap_103 * user_tweet_vectors.gap_103) +
(tweet_gap.gap_104 * user_tweet_vectors.gap_104) +
(tweet_gap.gap_105 * user_tweet_vectors.gap_105) +
(tweet_gap.gap_106 * user_tweet_vectors.gap_106) +
(tweet_gap.gap_107 * user_tweet_vectors.gap_107) +
(tweet_gap.gap_108 * user_tweet_vectors.gap_108) +
(tweet_gap.gap_109 * user_tweet_vectors.gap_109) +
(tweet_gap.gap_110 * user_tweet_vectors.gap_110) +
(tweet_gap.gap_111 * user_tweet_vectors.gap_111) +
(tweet_gap.gap_112 * user_tweet_vectors.gap_112) +
(tweet_gap.gap_113 * user_tweet_vectors.gap_113) +
(tweet_gap.gap_114 * user_tweet_vectors.gap_114) +
(tweet_gap.gap_115 * user_tweet_vectors.gap_115) +
(tweet_gap.gap_116 * user_tweet_vectors.gap_116) +
(tweet_gap.gap_117 * user_tweet_vectors.gap_117) +
(tweet_gap.gap_118 * user_tweet_vectors.gap_118) +
(tweet_gap.gap_119 * user_tweet_vectors.gap_119) +
(tweet_gap.gap_120 * user_tweet_vectors.gap_120) +
(tweet_gap.gap_121 * user_tweet_vectors.gap_121) +
(tweet_gap.gap_122 * user_tweet_vectors.gap_122) +
(tweet_gap.gap_123 * user_tweet_vectors.gap_123) +
(tweet_gap.gap_124 * user_tweet_vectors.gap_124) +
(tweet_gap.gap_125 * user_tweet_vectors.gap_125) +
(tweet_gap.gap_126 * user_tweet_vectors.gap_126) +
(tweet_gap.gap_127 * user_tweet_vectors.gap_127) +
(tweet_gap.gap_128 * user_tweet_vectors.gap_128) +
(tweet_gap.gap_129 * user_tweet_vectors.gap_129) +
(tweet_gap.gap_130 * user_tweet_vectors.gap_130) +
(tweet_gap.gap_131 * user_tweet_vectors.gap_131) +
(tweet_gap.gap_132 * user_tweet_vectors.gap_132) +
(tweet_gap.gap_133 * user_tweet_vectors.gap_133) +
(tweet_gap.gap_134 * user_tweet_vectors.gap_134) +
(tweet_gap.gap_135 * user_tweet_vectors.gap_135) +
(tweet_gap.gap_136 * user_tweet_vectors.gap_136) +
(tweet_gap.gap_137 * user_tweet_vectors.gap_137) +
(tweet_gap.gap_138 * user_tweet_vectors.gap_138) +
(tweet_gap.gap_139 * user_tweet_vectors.gap_139) +
(tweet_gap.gap_140 * user_tweet_vectors.gap_140) +
(tweet_gap.gap_141 * user_tweet_vectors.gap_141) +
(tweet_gap.gap_142 * user_tweet_vectors.gap_142) +
(tweet_gap.gap_143 * user_tweet_vectors.gap_143) +
(tweet_gap.gap_144 * user_tweet_vectors.gap_144) +
(tweet_gap.gap_145 * user_tweet_vectors.gap_145) +
(tweet_gap.gap_146 * user_tweet_vectors.gap_146) +
(tweet_gap.gap_147 * user_tweet_vectors.gap_147) +
(tweet_gap.gap_148 * user_tweet_vectors.gap_148) +
(tweet_gap.gap_149 * user_tweet_vectors.gap_149) +
(tweet_gap.gap_150 * user_tweet_vectors.gap_150) +
(tweet_gap.gap_151 * user_tweet_vectors.gap_151) +
(tweet_gap.gap_152 * user_tweet_vectors.gap_152) +
(tweet_gap.gap_153 * user_tweet_vectors.gap_153) +
(tweet_gap.gap_154 * user_tweet_vectors.gap_154) +
(tweet_gap.gap_155 * user_tweet_vectors.gap_155) +
(tweet_gap.gap_156 * user_tweet_vectors.gap_156) +
(tweet_gap.gap_157 * user_tweet_vectors.gap_157) +
(tweet_gap.gap_158 * user_tweet_vectors.gap_158) +
(tweet_gap.gap_159 * user_tweet_vectors.gap_159) +
(tweet_gap.gap_160 * user_tweet_vectors.gap_160) +
(tweet_gap.gap_161 * user_tweet_vectors.gap_161) +
(tweet_gap.gap_162 * user_tweet_vectors.gap_162) +
(tweet_gap.gap_163 * user_tweet_vectors.gap_163) +
(tweet_gap.gap_164 * user_tweet_vectors.gap_164) +
(tweet_gap.gap_165 * user_tweet_vectors.gap_165) +
(tweet_gap.gap_166 * user_tweet_vectors.gap_166) +
(tweet_gap.gap_167 * user_tweet_vectors.gap_167) +
(tweet_gap.gap_168 * user_tweet_vectors.gap_168) +
(tweet_gap.gap_169 * user_tweet_vectors.gap_169) +
(tweet_gap.gap_170 * user_tweet_vectors.gap_170) +
(tweet_gap.gap_171 * user_tweet_vectors.gap_171) +
(tweet_gap.gap_172 * user_tweet_vectors.gap_172) +
(tweet_gap.gap_173 * user_tweet_vectors.gap_173) +
(tweet_gap.gap_174 * user_tweet_vectors.gap_174) +
(tweet_gap.gap_175 * user_tweet_vectors.gap_175) +
(tweet_gap.gap_176 * user_tweet_vectors.gap_176) +
(tweet_gap.gap_177 * user_tweet_vectors.gap_177) +
(tweet_gap.gap_178 * user_tweet_vectors.gap_178) +
(tweet_gap.gap_179 * user_tweet_vectors.gap_179) +
(tweet_gap.gap_180 * user_tweet_vectors.gap_180) +
(tweet_gap.gap_181 * user_tweet_vectors.gap_181) +
(tweet_gap.gap_182 * user_tweet_vectors.gap_182) +
(tweet_gap.gap_183 * user_tweet_vectors.gap_183) +
(tweet_gap.gap_184 * user_tweet_vectors.gap_184) +
(tweet_gap.gap_185 * user_tweet_vectors.gap_185) +
(tweet_gap.gap_186 * user_tweet_vectors.gap_186) +
(tweet_gap.gap_187 * user_tweet_vectors.gap_187) +
(tweet_gap.gap_188 * user_tweet_vectors.gap_188) +
(tweet_gap.gap_189 * user_tweet_vectors.gap_189) +
(tweet_gap.gap_190 * user_tweet_vectors.gap_190) +
(tweet_gap.gap_191 * user_tweet_vectors.gap_191) +
(tweet_gap.gap_192 * user_tweet_vectors.gap_192) +
(tweet_gap.gap_193 * user_tweet_vectors.gap_193) +
(tweet_gap.gap_194 * user_tweet_vectors.gap_194) +
(tweet_gap.gap_195 * user_tweet_vectors.gap_195) +
(tweet_gap.gap_196 * user_tweet_vectors.gap_196) +
(tweet_gap.gap_197 * user_tweet_vectors.gap_197) +
(tweet_gap.gap_198 * user_tweet_vectors.gap_198) +
(tweet_gap.gap_199 * user_tweet_vectors.gap_199) +
(tweet_gap.gap_200 * user_tweet_vectors.gap_200) +
(tweet_gap.gap_201 * user_tweet_vectors.gap_201) +
(tweet_gap.gap_202 * user_tweet_vectors.gap_202) +
(tweet_gap.gap_203 * user_tweet_vectors.gap_203) +
(tweet_gap.gap_204 * user_tweet_vectors.gap_204) +
(tweet_gap.gap_205 * user_tweet_vectors.gap_205) +
(tweet_gap.gap_206 * user_tweet_vectors.gap_206) +
(tweet_gap.gap_207 * user_tweet_vectors.gap_207) +
(tweet_gap.gap_208 * user_tweet_vectors.gap_208) +
(tweet_gap.gap_209 * user_tweet_vectors.gap_209) +
(tweet_gap.gap_210 * user_tweet_vectors.gap_210) +
(tweet_gap.gap_211 * user_tweet_vectors.gap_211) +
(tweet_gap.gap_212 * user_tweet_vectors.gap_212) +
(tweet_gap.gap_213 * user_tweet_vectors.gap_213) +
(tweet_gap.gap_214 * user_tweet_vectors.gap_214) +
(tweet_gap.gap_215 * user_tweet_vectors.gap_215) +
(tweet_gap.gap_216 * user_tweet_vectors.gap_216) +
(tweet_gap.gap_217 * user_tweet_vectors.gap_217) +
(tweet_gap.gap_218 * user_tweet_vectors.gap_218) +
(tweet_gap.gap_219 * user_tweet_vectors.gap_219) +
(tweet_gap.gap_220 * user_tweet_vectors.gap_220) +
(tweet_gap.gap_221 * user_tweet_vectors.gap_221) +
(tweet_gap.gap_222 * user_tweet_vectors.gap_222) +
(tweet_gap.gap_223 * user_tweet_vectors.gap_223) +
(tweet_gap.gap_224 * user_tweet_vectors.gap_224) +
(tweet_gap.gap_225 * user_tweet_vectors.gap_225) +
(tweet_gap.gap_226 * user_tweet_vectors.gap_226) +
(tweet_gap.gap_227 * user_tweet_vectors.gap_227) +
(tweet_gap.gap_228 * user_tweet_vectors.gap_228) +
(tweet_gap.gap_229 * user_tweet_vectors.gap_229) +
(tweet_gap.gap_230 * user_tweet_vectors.gap_230) +
(tweet_gap.gap_231 * user_tweet_vectors.gap_231) +
(tweet_gap.gap_232 * user_tweet_vectors.gap_232) +
(tweet_gap.gap_233 * user_tweet_vectors.gap_233) +
(tweet_gap.gap_234 * user_tweet_vectors.gap_234) +
(tweet_gap.gap_235 * user_tweet_vectors.gap_235) +
(tweet_gap.gap_236 * user_tweet_vectors.gap_236) +
(tweet_gap.gap_237 * user_tweet_vectors.gap_237) +
(tweet_gap.gap_238 * user_tweet_vectors.gap_238) +
(tweet_gap.gap_239 * user_tweet_vectors.gap_239) +
(tweet_gap.gap_240 * user_tweet_vectors.gap_240) +
(tweet_gap.gap_241 * user_tweet_vectors.gap_241) +
(tweet_gap.gap_242 * user_tweet_vectors.gap_242) +
(tweet_gap.gap_243 * user_tweet_vectors.gap_243) +
(tweet_gap.gap_244 * user_tweet_vectors.gap_244) +
(tweet_gap.gap_245 * user_tweet_vectors.gap_245) +
(tweet_gap.gap_246 * user_tweet_vectors.gap_246) +
(tweet_gap.gap_247 * user_tweet_vectors.gap_247) +
(tweet_gap.gap_248 * user_tweet_vectors.gap_248) +
(tweet_gap.gap_249 * user_tweet_vectors.gap_249) +
(tweet_gap.gap_250 * user_tweet_vectors.gap_250) +
(tweet_gap.gap_251 * user_tweet_vectors.gap_251) +
(tweet_gap.gap_252 * user_tweet_vectors.gap_252) +
(tweet_gap.gap_253 * user_tweet_vectors.gap_253) +
(tweet_gap.gap_254 * user_tweet_vectors.gap_254) +
(tweet_gap.gap_255 * user_tweet_vectors.gap_255) +
(tweet_gap.gap_256 * user_tweet_vectors.gap_256) +
(tweet_gap.gap_257 * user_tweet_vectors.gap_257) +
(tweet_gap.gap_258 * user_tweet_vectors.gap_258) +
(tweet_gap.gap_259 * user_tweet_vectors.gap_259) +
(tweet_gap.gap_260 * user_tweet_vectors.gap_260) +
(tweet_gap.gap_261 * user_tweet_vectors.gap_261) +
(tweet_gap.gap_262 * user_tweet_vectors.gap_262) +
(tweet_gap.gap_263 * user_tweet_vectors.gap_263) +
(tweet_gap.gap_264 * user_tweet_vectors.gap_264) +
(tweet_gap.gap_265 * user_tweet_vectors.gap_265) +
(tweet_gap.gap_266 * user_tweet_vectors.gap_266) +
(tweet_gap.gap_267 * user_tweet_vectors.gap_267) +
(tweet_gap.gap_268 * user_tweet_vectors.gap_268) +
(tweet_gap.gap_269 * user_tweet_vectors.gap_269) +
(tweet_gap.gap_270 * user_tweet_vectors.gap_270) +
(tweet_gap.gap_271 * user_tweet_vectors.gap_271) +
(tweet_gap.gap_272 * user_tweet_vectors.gap_272) +
(tweet_gap.gap_273 * user_tweet_vectors.gap_273) +
(tweet_gap.gap_274 * user_tweet_vectors.gap_274) +
(tweet_gap.gap_275 * user_tweet_vectors.gap_275) +
(tweet_gap.gap_276 * user_tweet_vectors.gap_276) +
(tweet_gap.gap_277 * user_tweet_vectors.gap_277) +
(tweet_gap.gap_278 * user_tweet_vectors.gap_278) +
(tweet_gap.gap_279 * user_tweet_vectors.gap_279) +
(tweet_gap.gap_280 * user_tweet_vectors.gap_280) +
(tweet_gap.gap_281 * user_tweet_vectors.gap_281) +
(tweet_gap.gap_282 * user_tweet_vectors.gap_282) +
(tweet_gap.gap_283 * user_tweet_vectors.gap_283) +
(tweet_gap.gap_284 * user_tweet_vectors.gap_284) +
(tweet_gap.gap_285 * user_tweet_vectors.gap_285) +
(tweet_gap.gap_286 * user_tweet_vectors.gap_286) +
(tweet_gap.gap_287 * user_tweet_vectors.gap_287) +
(tweet_gap.gap_288 * user_tweet_vectors.gap_288) +
(tweet_gap.gap_289 * user_tweet_vectors.gap_289) +
(tweet_gap.gap_290 * user_tweet_vectors.gap_290) +
(tweet_gap.gap_291 * user_tweet_vectors.gap_291) +
(tweet_gap.gap_292 * user_tweet_vectors.gap_292) +
(tweet_gap.gap_293 * user_tweet_vectors.gap_293) +
(tweet_gap.gap_294 * user_tweet_vectors.gap_294) +
(tweet_gap.gap_295 * user_tweet_vectors.gap_295) +
(tweet_gap.gap_296 * user_tweet_vectors.gap_296) +
(tweet_gap.gap_297 * user_tweet_vectors.gap_297) +
(tweet_gap.gap_298 * user_tweet_vectors.gap_298) +
(tweet_gap.gap_299 * user_tweet_vectors.gap_299) +
(tweet_gap.gap_300 * user_tweet_vectors.gap_300) +
(tweet_gap.gap_301 * user_tweet_vectors.gap_301) +
(tweet_gap.gap_302 * user_tweet_vectors.gap_302) +
(tweet_gap.gap_303 * user_tweet_vectors.gap_303) +
(tweet_gap.gap_304 * user_tweet_vectors.gap_304) +
(tweet_gap.gap_305 * user_tweet_vectors.gap_305) +
(tweet_gap.gap_306 * user_tweet_vectors.gap_306) +
(tweet_gap.gap_307 * user_tweet_vectors.gap_307) +
(tweet_gap.gap_308 * user_tweet_vectors.gap_308) +
(tweet_gap.gap_309 * user_tweet_vectors.gap_309) +
(tweet_gap.gap_310 * user_tweet_vectors.gap_310) +
(tweet_gap.gap_311 * user_tweet_vectors.gap_311) +
(tweet_gap.gap_312 * user_tweet_vectors.gap_312) +
(tweet_gap.gap_313 * user_tweet_vectors.gap_313) +
(tweet_gap.gap_314 * user_tweet_vectors.gap_314) +
(tweet_gap.gap_315 * user_tweet_vectors.gap_315) +
(tweet_gap.gap_316 * user_tweet_vectors.gap_316) +
(tweet_gap.gap_317 * user_tweet_vectors.gap_317) +
(tweet_gap.gap_318 * user_tweet_vectors.gap_318) +
(tweet_gap.gap_319 * user_tweet_vectors.gap_319) +
(tweet_gap.gap_320 * user_tweet_vectors.gap_320) +
(tweet_gap.gap_321 * user_tweet_vectors.gap_321) +
(tweet_gap.gap_322 * user_tweet_vectors.gap_322) +
(tweet_gap.gap_323 * user_tweet_vectors.gap_323) +
(tweet_gap.gap_324 * user_tweet_vectors.gap_324) +
(tweet_gap.gap_325 * user_tweet_vectors.gap_325) +
(tweet_gap.gap_326 * user_tweet_vectors.gap_326) +
(tweet_gap.gap_327 * user_tweet_vectors.gap_327) +
(tweet_gap.gap_328 * user_tweet_vectors.gap_328) +
(tweet_gap.gap_329 * user_tweet_vectors.gap_329) +
(tweet_gap.gap_330 * user_tweet_vectors.gap_330) +
(tweet_gap.gap_331 * user_tweet_vectors.gap_331) +
(tweet_gap.gap_332 * user_tweet_vectors.gap_332) +
(tweet_gap.gap_333 * user_tweet_vectors.gap_333) +
(tweet_gap.gap_334 * user_tweet_vectors.gap_334) +
(tweet_gap.gap_335 * user_tweet_vectors.gap_335) +
(tweet_gap.gap_336 * user_tweet_vectors.gap_336) +
(tweet_gap.gap_337 * user_tweet_vectors.gap_337) +
(tweet_gap.gap_338 * user_tweet_vectors.gap_338) +
(tweet_gap.gap_339 * user_tweet_vectors.gap_339) +
(tweet_gap.gap_340 * user_tweet_vectors.gap_340) +
(tweet_gap.gap_341 * user_tweet_vectors.gap_341) +
(tweet_gap.gap_342 * user_tweet_vectors.gap_342) +
(tweet_gap.gap_343 * user_tweet_vectors.gap_343) +
(tweet_gap.gap_344 * user_tweet_vectors.gap_344) +
(tweet_gap.gap_345 * user_tweet_vectors.gap_345) +
(tweet_gap.gap_346 * user_tweet_vectors.gap_346) +
(tweet_gap.gap_347 * user_tweet_vectors.gap_347) +
(tweet_gap.gap_348 * user_tweet_vectors.gap_348) +
(tweet_gap.gap_349 * user_tweet_vectors.gap_349) +
(tweet_gap.gap_350 * user_tweet_vectors.gap_350) +
(tweet_gap.gap_351 * user_tweet_vectors.gap_351) +
(tweet_gap.gap_352 * user_tweet_vectors.gap_352) +
(tweet_gap.gap_353 * user_tweet_vectors.gap_353) +
(tweet_gap.gap_354 * user_tweet_vectors.gap_354) +
(tweet_gap.gap_355 * user_tweet_vectors.gap_355) +
(tweet_gap.gap_356 * user_tweet_vectors.gap_356) +
(tweet_gap.gap_357 * user_tweet_vectors.gap_357) +
(tweet_gap.gap_358 * user_tweet_vectors.gap_358) +
(tweet_gap.gap_359 * user_tweet_vectors.gap_359) +
(tweet_gap.gap_360 * user_tweet_vectors.gap_360) +
(tweet_gap.gap_361 * user_tweet_vectors.gap_361) +
(tweet_gap.gap_362 * user_tweet_vectors.gap_362) +
(tweet_gap.gap_363 * user_tweet_vectors.gap_363) +
(tweet_gap.gap_364 * user_tweet_vectors.gap_364) +
(tweet_gap.gap_365 * user_tweet_vectors.gap_365) +
(tweet_gap.gap_366 * user_tweet_vectors.gap_366) +
(tweet_gap.gap_367 * user_tweet_vectors.gap_367) +
(tweet_gap.gap_368 * user_tweet_vectors.gap_368) +
(tweet_gap.gap_369 * user_tweet_vectors.gap_369) +
(tweet_gap.gap_370 * user_tweet_vectors.gap_370) +
(tweet_gap.gap_371 * user_tweet_vectors.gap_371) +
(tweet_gap.gap_372 * user_tweet_vectors.gap_372) +
(tweet_gap.gap_373 * user_tweet_vectors.gap_373) +
(tweet_gap.gap_374 * user_tweet_vectors.gap_374) +
(tweet_gap.gap_375 * user_tweet_vectors.gap_375) +
(tweet_gap.gap_376 * user_tweet_vectors.gap_376) +
(tweet_gap.gap_377 * user_tweet_vectors.gap_377) +
(tweet_gap.gap_378 * user_tweet_vectors.gap_378) +
(tweet_gap.gap_379 * user_tweet_vectors.gap_379) +
(tweet_gap.gap_380 * user_tweet_vectors.gap_380) +
(tweet_gap.gap_381 * user_tweet_vectors.gap_381) +
(tweet_gap.gap_382 * user_tweet_vectors.gap_382) +
(tweet_gap.gap_383 * user_tweet_vectors.gap_383) +
(tweet_gap.gap_384 * user_tweet_vectors.gap_384) +
(tweet_gap.gap_385 * user_tweet_vectors.gap_385) +
(tweet_gap.gap_386 * user_tweet_vectors.gap_386) +
(tweet_gap.gap_387 * user_tweet_vectors.gap_387) +
(tweet_gap.gap_388 * user_tweet_vectors.gap_388) +
(tweet_gap.gap_389 * user_tweet_vectors.gap_389) +
(tweet_gap.gap_390 * user_tweet_vectors.gap_390) +
(tweet_gap.gap_391 * user_tweet_vectors.gap_391) +
(tweet_gap.gap_392 * user_tweet_vectors.gap_392) +
(tweet_gap.gap_393 * user_tweet_vectors.gap_393) +
(tweet_gap.gap_394 * user_tweet_vectors.gap_394) +
(tweet_gap.gap_395 * user_tweet_vectors.gap_395) +
(tweet_gap.gap_396 * user_tweet_vectors.gap_396) +
(tweet_gap.gap_397 * user_tweet_vectors.gap_397) +
(tweet_gap.gap_398 * user_tweet_vectors.gap_398) +
(tweet_gap.gap_399 * user_tweet_vectors.gap_399) +
(tweet_gap.gap_400 * user_tweet_vectors.gap_400) +
(tweet_gap.gap_401 * user_tweet_vectors.gap_401) +
(tweet_gap.gap_402 * user_tweet_vectors.gap_402) +
(tweet_gap.gap_403 * user_tweet_vectors.gap_403) +
(tweet_gap.gap_404 * user_tweet_vectors.gap_404) +
(tweet_gap.gap_405 * user_tweet_vectors.gap_405) +
(tweet_gap.gap_406 * user_tweet_vectors.gap_406) +
(tweet_gap.gap_407 * user_tweet_vectors.gap_407) +
(tweet_gap.gap_408 * user_tweet_vectors.gap_408) +
(tweet_gap.gap_409 * user_tweet_vectors.gap_409) +
(tweet_gap.gap_410 * user_tweet_vectors.gap_410) +
(tweet_gap.gap_411 * user_tweet_vectors.gap_411) +
(tweet_gap.gap_412 * user_tweet_vectors.gap_412) +
(tweet_gap.gap_413 * user_tweet_vectors.gap_413) +
(tweet_gap.gap_414 * user_tweet_vectors.gap_414) +
(tweet_gap.gap_415 * user_tweet_vectors.gap_415) +
(tweet_gap.gap_416 * user_tweet_vectors.gap_416) +
(tweet_gap.gap_417 * user_tweet_vectors.gap_417) +
(tweet_gap.gap_418 * user_tweet_vectors.gap_418) +
(tweet_gap.gap_419 * user_tweet_vectors.gap_419) +
(tweet_gap.gap_420 * user_tweet_vectors.gap_420) +
(tweet_gap.gap_421 * user_tweet_vectors.gap_421) +
(tweet_gap.gap_422 * user_tweet_vectors.gap_422) +
(tweet_gap.gap_423 * user_tweet_vectors.gap_423) +
(tweet_gap.gap_424 * user_tweet_vectors.gap_424) +
(tweet_gap.gap_425 * user_tweet_vectors.gap_425) +
(tweet_gap.gap_426 * user_tweet_vectors.gap_426) +
(tweet_gap.gap_427 * user_tweet_vectors.gap_427) +
(tweet_gap.gap_428 * user_tweet_vectors.gap_428) +
(tweet_gap.gap_429 * user_tweet_vectors.gap_429) +
(tweet_gap.gap_430 * user_tweet_vectors.gap_430) +
(tweet_gap.gap_431 * user_tweet_vectors.gap_431) +
(tweet_gap.gap_432 * user_tweet_vectors.gap_432) +
(tweet_gap.gap_433 * user_tweet_vectors.gap_433) +
(tweet_gap.gap_434 * user_tweet_vectors.gap_434) +
(tweet_gap.gap_435 * user_tweet_vectors.gap_435) +
(tweet_gap.gap_436 * user_tweet_vectors.gap_436) +
(tweet_gap.gap_437 * user_tweet_vectors.gap_437) +
(tweet_gap.gap_438 * user_tweet_vectors.gap_438) +
(tweet_gap.gap_439 * user_tweet_vectors.gap_439) +
(tweet_gap.gap_440 * user_tweet_vectors.gap_440) +
(tweet_gap.gap_441 * user_tweet_vectors.gap_441) +
(tweet_gap.gap_442 * user_tweet_vectors.gap_442) +
(tweet_gap.gap_443 * user_tweet_vectors.gap_443) +
(tweet_gap.gap_444 * user_tweet_vectors.gap_444) +
(tweet_gap.gap_445 * user_tweet_vectors.gap_445) +
(tweet_gap.gap_446 * user_tweet_vectors.gap_446) +
(tweet_gap.gap_447 * user_tweet_vectors.gap_447) +
(tweet_gap.gap_448 * user_tweet_vectors.gap_448) +
(tweet_gap.gap_449 * user_tweet_vectors.gap_449) +
(tweet_gap.gap_450 * user_tweet_vectors.gap_450) +
(tweet_gap.gap_451 * user_tweet_vectors.gap_451) +
(tweet_gap.gap_452 * user_tweet_vectors.gap_452) +
(tweet_gap.gap_453 * user_tweet_vectors.gap_453) +
(tweet_gap.gap_454 * user_tweet_vectors.gap_454) +
(tweet_gap.gap_455 * user_tweet_vectors.gap_455) +
(tweet_gap.gap_456 * user_tweet_vectors.gap_456) +
(tweet_gap.gap_457 * user_tweet_vectors.gap_457) +
(tweet_gap.gap_458 * user_tweet_vectors.gap_458) +
(tweet_gap.gap_459 * user_tweet_vectors.gap_459) +
(tweet_gap.gap_460 * user_tweet_vectors.gap_460) +
(tweet_gap.gap_461 * user_tweet_vectors.gap_461) +
(tweet_gap.gap_462 * user_tweet_vectors.gap_462) +
(tweet_gap.gap_463 * user_tweet_vectors.gap_463) +
(tweet_gap.gap_464 * user_tweet_vectors.gap_464) +
(tweet_gap.gap_465 * user_tweet_vectors.gap_465) +
(tweet_gap.gap_466 * user_tweet_vectors.gap_466) +
(tweet_gap.gap_467 * user_tweet_vectors.gap_467) +
(tweet_gap.gap_468 * user_tweet_vectors.gap_468) +
(tweet_gap.gap_469 * user_tweet_vectors.gap_469) +
(tweet_gap.gap_470 * user_tweet_vectors.gap_470) +
(tweet_gap.gap_471 * user_tweet_vectors.gap_471) +
(tweet_gap.gap_472 * user_tweet_vectors.gap_472) +
(tweet_gap.gap_473 * user_tweet_vectors.gap_473) +
(tweet_gap.gap_474 * user_tweet_vectors.gap_474) +
(tweet_gap.gap_475 * user_tweet_vectors.gap_475) +
(tweet_gap.gap_476 * user_tweet_vectors.gap_476) +
(tweet_gap.gap_477 * user_tweet_vectors.gap_477) +
(tweet_gap.gap_478 * user_tweet_vectors.gap_478) +
(tweet_gap.gap_479 * user_tweet_vectors.gap_479) +
(tweet_gap.gap_480 * user_tweet_vectors.gap_480) +
(tweet_gap.gap_481 * user_tweet_vectors.gap_481) +
(tweet_gap.gap_482 * user_tweet_vectors.gap_482) +
(tweet_gap.gap_483 * user_tweet_vectors.gap_483) +
(tweet_gap.gap_484 * user_tweet_vectors.gap_484) +
(tweet_gap.gap_485 * user_tweet_vectors.gap_485) +
(tweet_gap.gap_486 * user_tweet_vectors.gap_486) +
(tweet_gap.gap_487 * user_tweet_vectors.gap_487) +
(tweet_gap.gap_488 * user_tweet_vectors.gap_488) +
(tweet_gap.gap_489 * user_tweet_vectors.gap_489) +
(tweet_gap.gap_490 * user_tweet_vectors.gap_490) +
(tweet_gap.gap_491 * user_tweet_vectors.gap_491) +
(tweet_gap.gap_492 * user_tweet_vectors.gap_492) +
(tweet_gap.gap_493 * user_tweet_vectors.gap_493) +
(tweet_gap.gap_494 * user_tweet_vectors.gap_494) +
(tweet_gap.gap_495 * user_tweet_vectors.gap_495) +
(tweet_gap.gap_496 * user_tweet_vectors.gap_496) +
(tweet_gap.gap_497 * user_tweet_vectors.gap_497) +
(tweet_gap.gap_498 * user_tweet_vectors.gap_498) +
(tweet_gap.gap_499 * user_tweet_vectors.gap_499) +
(tweet_gap.gap_500 * user_tweet_vectors.gap_500) +
(tweet_gap.gap_501 * user_tweet_vectors.gap_501) +
(tweet_gap.gap_502 * user_tweet_vectors.gap_502) +
(tweet_gap.gap_503 * user_tweet_vectors.gap_503) +
(tweet_gap.gap_504 * user_tweet_vectors.gap_504) +
(tweet_gap.gap_505 * user_tweet_vectors.gap_505) +
(tweet_gap.gap_506 * user_tweet_vectors.gap_506) +
(tweet_gap.gap_507 * user_tweet_vectors.gap_507) +
(tweet_gap.gap_508 * user_tweet_vectors.gap_508) +
(tweet_gap.gap_509 * user_tweet_vectors.gap_509) +
(tweet_gap.gap_510 * user_tweet_vectors.gap_510) +
(tweet_gap.gap_511 * user_tweet_vectors.gap_511) +
(tweet_gap.gap_512 * user_tweet_vectors.gap_512) +
(tweet_gap.gap_513 * user_tweet_vectors.gap_513) +
(tweet_gap.gap_514 * user_tweet_vectors.gap_514) +
(tweet_gap.gap_515 * user_tweet_vectors.gap_515) +
(tweet_gap.gap_516 * user_tweet_vectors.gap_516) +
(tweet_gap.gap_517 * user_tweet_vectors.gap_517) +
(tweet_gap.gap_518 * user_tweet_vectors.gap_518) +
(tweet_gap.gap_519 * user_tweet_vectors.gap_519) +
(tweet_gap.gap_520 * user_tweet_vectors.gap_520) +
(tweet_gap.gap_521 * user_tweet_vectors.gap_521) +
(tweet_gap.gap_522 * user_tweet_vectors.gap_522) +
(tweet_gap.gap_523 * user_tweet_vectors.gap_523) +
(tweet_gap.gap_524 * user_tweet_vectors.gap_524) +
(tweet_gap.gap_525 * user_tweet_vectors.gap_525) +
(tweet_gap.gap_526 * user_tweet_vectors.gap_526) +
(tweet_gap.gap_527 * user_tweet_vectors.gap_527) +
(tweet_gap.gap_528 * user_tweet_vectors.gap_528) +
(tweet_gap.gap_529 * user_tweet_vectors.gap_529) +
(tweet_gap.gap_530 * user_tweet_vectors.gap_530) +
(tweet_gap.gap_531 * user_tweet_vectors.gap_531) +
(tweet_gap.gap_532 * user_tweet_vectors.gap_532) +
(tweet_gap.gap_533 * user_tweet_vectors.gap_533) +
(tweet_gap.gap_534 * user_tweet_vectors.gap_534) +
(tweet_gap.gap_535 * user_tweet_vectors.gap_535) +
(tweet_gap.gap_536 * user_tweet_vectors.gap_536) +
(tweet_gap.gap_537 * user_tweet_vectors.gap_537) +
(tweet_gap.gap_538 * user_tweet_vectors.gap_538) +
(tweet_gap.gap_539 * user_tweet_vectors.gap_539) +
(tweet_gap.gap_540 * user_tweet_vectors.gap_540) +
(tweet_gap.gap_541 * user_tweet_vectors.gap_541) +
(tweet_gap.gap_542 * user_tweet_vectors.gap_542) +
(tweet_gap.gap_543 * user_tweet_vectors.gap_543) +
(tweet_gap.gap_544 * user_tweet_vectors.gap_544) +
(tweet_gap.gap_545 * user_tweet_vectors.gap_545) +
(tweet_gap.gap_546 * user_tweet_vectors.gap_546) +
(tweet_gap.gap_547 * user_tweet_vectors.gap_547) +
(tweet_gap.gap_548 * user_tweet_vectors.gap_548) +
(tweet_gap.gap_549 * user_tweet_vectors.gap_549) +
(tweet_gap.gap_550 * user_tweet_vectors.gap_550) +
(tweet_gap.gap_551 * user_tweet_vectors.gap_551) +
(tweet_gap.gap_552 * user_tweet_vectors.gap_552) +
(tweet_gap.gap_553 * user_tweet_vectors.gap_553) +
(tweet_gap.gap_554 * user_tweet_vectors.gap_554) +
(tweet_gap.gap_555 * user_tweet_vectors.gap_555) +
(tweet_gap.gap_556 * user_tweet_vectors.gap_556) +
(tweet_gap.gap_557 * user_tweet_vectors.gap_557) +
(tweet_gap.gap_558 * user_tweet_vectors.gap_558) +
(tweet_gap.gap_559 * user_tweet_vectors.gap_559) +
(tweet_gap.gap_560 * user_tweet_vectors.gap_560) +
(tweet_gap.gap_561 * user_tweet_vectors.gap_561) +
(tweet_gap.gap_562 * user_tweet_vectors.gap_562) +
(tweet_gap.gap_563 * user_tweet_vectors.gap_563) +
(tweet_gap.gap_564 * user_tweet_vectors.gap_564) +
(tweet_gap.gap_565 * user_tweet_vectors.gap_565) +
(tweet_gap.gap_566 * user_tweet_vectors.gap_566) +
(tweet_gap.gap_567 * user_tweet_vectors.gap_567) +
(tweet_gap.gap_568 * user_tweet_vectors.gap_568) +
(tweet_gap.gap_569 * user_tweet_vectors.gap_569) +
(tweet_gap.gap_570 * user_tweet_vectors.gap_570) +
(tweet_gap.gap_571 * user_tweet_vectors.gap_571) +
(tweet_gap.gap_572 * user_tweet_vectors.gap_572) +
(tweet_gap.gap_573 * user_tweet_vectors.gap_573) +
(tweet_gap.gap_574 * user_tweet_vectors.gap_574) +
(tweet_gap.gap_575 * user_tweet_vectors.gap_575) +
(tweet_gap.gap_576 * user_tweet_vectors.gap_576) +
(tweet_gap.gap_577 * user_tweet_vectors.gap_577) +
(tweet_gap.gap_578 * user_tweet_vectors.gap_578) +
(tweet_gap.gap_579 * user_tweet_vectors.gap_579) +
(tweet_gap.gap_580 * user_tweet_vectors.gap_580) +
(tweet_gap.gap_581 * user_tweet_vectors.gap_581) +
(tweet_gap.gap_582 * user_tweet_vectors.gap_582) +
(tweet_gap.gap_583 * user_tweet_vectors.gap_583) +
(tweet_gap.gap_584 * user_tweet_vectors.gap_584) +
(tweet_gap.gap_585 * user_tweet_vectors.gap_585) +
(tweet_gap.gap_586 * user_tweet_vectors.gap_586) +
(tweet_gap.gap_587 * user_tweet_vectors.gap_587) +
(tweet_gap.gap_588 * user_tweet_vectors.gap_588) +
(tweet_gap.gap_589 * user_tweet_vectors.gap_589) +
(tweet_gap.gap_590 * user_tweet_vectors.gap_590) +
(tweet_gap.gap_591 * user_tweet_vectors.gap_591) +
(tweet_gap.gap_592 * user_tweet_vectors.gap_592) +
(tweet_gap.gap_593 * user_tweet_vectors.gap_593) +
(tweet_gap.gap_594 * user_tweet_vectors.gap_594) +
(tweet_gap.gap_595 * user_tweet_vectors.gap_595) +
(tweet_gap.gap_596 * user_tweet_vectors.gap_596) +
(tweet_gap.gap_597 * user_tweet_vectors.gap_597) +
(tweet_gap.gap_598 * user_tweet_vectors.gap_598) +
(tweet_gap.gap_599 * user_tweet_vectors.gap_599) +
(tweet_gap.gap_600 * user_tweet_vectors.gap_600) +
(tweet_gap.gap_601 * user_tweet_vectors.gap_601) +
(tweet_gap.gap_602 * user_tweet_vectors.gap_602) +
(tweet_gap.gap_603 * user_tweet_vectors.gap_603) +
(tweet_gap.gap_604 * user_tweet_vectors.gap_604) +
(tweet_gap.gap_605 * user_tweet_vectors.gap_605) +
(tweet_gap.gap_606 * user_tweet_vectors.gap_606) +
(tweet_gap.gap_607 * user_tweet_vectors.gap_607) +
(tweet_gap.gap_608 * user_tweet_vectors.gap_608) +
(tweet_gap.gap_609 * user_tweet_vectors.gap_609) +
(tweet_gap.gap_610 * user_tweet_vectors.gap_610) +
(tweet_gap.gap_611 * user_tweet_vectors.gap_611) +
(tweet_gap.gap_612 * user_tweet_vectors.gap_612) +
(tweet_gap.gap_613 * user_tweet_vectors.gap_613) +
(tweet_gap.gap_614 * user_tweet_vectors.gap_614) +
(tweet_gap.gap_615 * user_tweet_vectors.gap_615) +
(tweet_gap.gap_616 * user_tweet_vectors.gap_616) +
(tweet_gap.gap_617 * user_tweet_vectors.gap_617) +
(tweet_gap.gap_618 * user_tweet_vectors.gap_618) +
(tweet_gap.gap_619 * user_tweet_vectors.gap_619) +
(tweet_gap.gap_620 * user_tweet_vectors.gap_620) +
(tweet_gap.gap_621 * user_tweet_vectors.gap_621) +
(tweet_gap.gap_622 * user_tweet_vectors.gap_622) +
(tweet_gap.gap_623 * user_tweet_vectors.gap_623) +
(tweet_gap.gap_624 * user_tweet_vectors.gap_624) +
(tweet_gap.gap_625 * user_tweet_vectors.gap_625) +
(tweet_gap.gap_626 * user_tweet_vectors.gap_626) +
(tweet_gap.gap_627 * user_tweet_vectors.gap_627) +
(tweet_gap.gap_628 * user_tweet_vectors.gap_628) +
(tweet_gap.gap_629 * user_tweet_vectors.gap_629) +
(tweet_gap.gap_630 * user_tweet_vectors.gap_630) +
(tweet_gap.gap_631 * user_tweet_vectors.gap_631) +
(tweet_gap.gap_632 * user_tweet_vectors.gap_632) +
(tweet_gap.gap_633 * user_tweet_vectors.gap_633) +
(tweet_gap.gap_634 * user_tweet_vectors.gap_634) +
(tweet_gap.gap_635 * user_tweet_vectors.gap_635) +
(tweet_gap.gap_636 * user_tweet_vectors.gap_636) +
(tweet_gap.gap_637 * user_tweet_vectors.gap_637) +
(tweet_gap.gap_638 * user_tweet_vectors.gap_638) +
(tweet_gap.gap_639 * user_tweet_vectors.gap_639) +
(tweet_gap.gap_640 * user_tweet_vectors.gap_640) +
(tweet_gap.gap_641 * user_tweet_vectors.gap_641) +
(tweet_gap.gap_642 * user_tweet_vectors.gap_642) +
(tweet_gap.gap_643 * user_tweet_vectors.gap_643) +
(tweet_gap.gap_644 * user_tweet_vectors.gap_644) +
(tweet_gap.gap_645 * user_tweet_vectors.gap_645) +
(tweet_gap.gap_646 * user_tweet_vectors.gap_646) +
(tweet_gap.gap_647 * user_tweet_vectors.gap_647) +
(tweet_gap.gap_648 * user_tweet_vectors.gap_648) +
(tweet_gap.gap_649 * user_tweet_vectors.gap_649) +
(tweet_gap.gap_650 * user_tweet_vectors.gap_650) +
(tweet_gap.gap_651 * user_tweet_vectors.gap_651) +
(tweet_gap.gap_652 * user_tweet_vectors.gap_652) +
(tweet_gap.gap_653 * user_tweet_vectors.gap_653) +
(tweet_gap.gap_654 * user_tweet_vectors.gap_654) +
(tweet_gap.gap_655 * user_tweet_vectors.gap_655) +
(tweet_gap.gap_656 * user_tweet_vectors.gap_656) +
(tweet_gap.gap_657 * user_tweet_vectors.gap_657) +
(tweet_gap.gap_658 * user_tweet_vectors.gap_658) +
(tweet_gap.gap_659 * user_tweet_vectors.gap_659) +
(tweet_gap.gap_660 * user_tweet_vectors.gap_660) +
(tweet_gap.gap_661 * user_tweet_vectors.gap_661) +
(tweet_gap.gap_662 * user_tweet_vectors.gap_662) +
(tweet_gap.gap_663 * user_tweet_vectors.gap_663) +
(tweet_gap.gap_664 * user_tweet_vectors.gap_664) +
(tweet_gap.gap_665 * user_tweet_vectors.gap_665) +
(tweet_gap.gap_666 * user_tweet_vectors.gap_666) +
(tweet_gap.gap_667 * user_tweet_vectors.gap_667) +
(tweet_gap.gap_668 * user_tweet_vectors.gap_668) +
(tweet_gap.gap_669 * user_tweet_vectors.gap_669) +
(tweet_gap.gap_670 * user_tweet_vectors.gap_670) +
(tweet_gap.gap_671 * user_tweet_vectors.gap_671) +
(tweet_gap.gap_672 * user_tweet_vectors.gap_672) +
(tweet_gap.gap_673 * user_tweet_vectors.gap_673) +
(tweet_gap.gap_674 * user_tweet_vectors.gap_674) +
(tweet_gap.gap_675 * user_tweet_vectors.gap_675) +
(tweet_gap.gap_676 * user_tweet_vectors.gap_676) +
(tweet_gap.gap_677 * user_tweet_vectors.gap_677) +
(tweet_gap.gap_678 * user_tweet_vectors.gap_678) +
(tweet_gap.gap_679 * user_tweet_vectors.gap_679) +
(tweet_gap.gap_680 * user_tweet_vectors.gap_680) +
(tweet_gap.gap_681 * user_tweet_vectors.gap_681) +
(tweet_gap.gap_682 * user_tweet_vectors.gap_682) +
(tweet_gap.gap_683 * user_tweet_vectors.gap_683) +
(tweet_gap.gap_684 * user_tweet_vectors.gap_684) +
(tweet_gap.gap_685 * user_tweet_vectors.gap_685) +
(tweet_gap.gap_686 * user_tweet_vectors.gap_686) +
(tweet_gap.gap_687 * user_tweet_vectors.gap_687) +
(tweet_gap.gap_688 * user_tweet_vectors.gap_688) +
(tweet_gap.gap_689 * user_tweet_vectors.gap_689) +
(tweet_gap.gap_690 * user_tweet_vectors.gap_690) +
(tweet_gap.gap_691 * user_tweet_vectors.gap_691) +
(tweet_gap.gap_692 * user_tweet_vectors.gap_692) +
(tweet_gap.gap_693 * user_tweet_vectors.gap_693) +
(tweet_gap.gap_694 * user_tweet_vectors.gap_694) +
(tweet_gap.gap_695 * user_tweet_vectors.gap_695) +
(tweet_gap.gap_696 * user_tweet_vectors.gap_696) +
(tweet_gap.gap_697 * user_tweet_vectors.gap_697) +
(tweet_gap.gap_698 * user_tweet_vectors.gap_698) +
(tweet_gap.gap_699 * user_tweet_vectors.gap_699) +
(tweet_gap.gap_700 * user_tweet_vectors.gap_700) +
(tweet_gap.gap_701 * user_tweet_vectors.gap_701) +
(tweet_gap.gap_702 * user_tweet_vectors.gap_702) +
(tweet_gap.gap_703 * user_tweet_vectors.gap_703) +
(tweet_gap.gap_704 * user_tweet_vectors.gap_704) +
(tweet_gap.gap_705 * user_tweet_vectors.gap_705) +
(tweet_gap.gap_706 * user_tweet_vectors.gap_706) +
(tweet_gap.gap_707 * user_tweet_vectors.gap_707) +
(tweet_gap.gap_708 * user_tweet_vectors.gap_708) +
(tweet_gap.gap_709 * user_tweet_vectors.gap_709) +
(tweet_gap.gap_710 * user_tweet_vectors.gap_710) +
(tweet_gap.gap_711 * user_tweet_vectors.gap_711) +
(tweet_gap.gap_712 * user_tweet_vectors.gap_712) +
(tweet_gap.gap_713 * user_tweet_vectors.gap_713) +
(tweet_gap.gap_714 * user_tweet_vectors.gap_714) +
(tweet_gap.gap_715 * user_tweet_vectors.gap_715) +
(tweet_gap.gap_716 * user_tweet_vectors.gap_716) +
(tweet_gap.gap_717 * user_tweet_vectors.gap_717) +
(tweet_gap.gap_718 * user_tweet_vectors.gap_718) +
(tweet_gap.gap_719 * user_tweet_vectors.gap_719) +
(tweet_gap.gap_720 * user_tweet_vectors.gap_720) +
(tweet_gap.gap_721 * user_tweet_vectors.gap_721) +
(tweet_gap.gap_722 * user_tweet_vectors.gap_722) +
(tweet_gap.gap_723 * user_tweet_vectors.gap_723) +
(tweet_gap.gap_724 * user_tweet_vectors.gap_724) +
(tweet_gap.gap_725 * user_tweet_vectors.gap_725) +
(tweet_gap.gap_726 * user_tweet_vectors.gap_726) +
(tweet_gap.gap_727 * user_tweet_vectors.gap_727) +
(tweet_gap.gap_728 * user_tweet_vectors.gap_728) +
(tweet_gap.gap_729 * user_tweet_vectors.gap_729) +
(tweet_gap.gap_730 * user_tweet_vectors.gap_730) +
(tweet_gap.gap_731 * user_tweet_vectors.gap_731) +
(tweet_gap.gap_732 * user_tweet_vectors.gap_732) +
(tweet_gap.gap_733 * user_tweet_vectors.gap_733) +
(tweet_gap.gap_734 * user_tweet_vectors.gap_734) +
(tweet_gap.gap_735 * user_tweet_vectors.gap_735) +
(tweet_gap.gap_736 * user_tweet_vectors.gap_736) +
(tweet_gap.gap_737 * user_tweet_vectors.gap_737) +
(tweet_gap.gap_738 * user_tweet_vectors.gap_738) +
(tweet_gap.gap_739 * user_tweet_vectors.gap_739) +
(tweet_gap.gap_740 * user_tweet_vectors.gap_740) +
(tweet_gap.gap_741 * user_tweet_vectors.gap_741) +
(tweet_gap.gap_742 * user_tweet_vectors.gap_742) +
(tweet_gap.gap_743 * user_tweet_vectors.gap_743) +
(tweet_gap.gap_744 * user_tweet_vectors.gap_744) +
(tweet_gap.gap_745 * user_tweet_vectors.gap_745) +
(tweet_gap.gap_746 * user_tweet_vectors.gap_746) +
(tweet_gap.gap_747 * user_tweet_vectors.gap_747) +
(tweet_gap.gap_748 * user_tweet_vectors.gap_748) +
(tweet_gap.gap_749 * user_tweet_vectors.gap_749) +
(tweet_gap.gap_750 * user_tweet_vectors.gap_750) +
(tweet_gap.gap_751 * user_tweet_vectors.gap_751) +
(tweet_gap.gap_752 * user_tweet_vectors.gap_752) +
(tweet_gap.gap_753 * user_tweet_vectors.gap_753) +
(tweet_gap.gap_754 * user_tweet_vectors.gap_754) +
(tweet_gap.gap_755 * user_tweet_vectors.gap_755) +
(tweet_gap.gap_756 * user_tweet_vectors.gap_756) +
(tweet_gap.gap_757 * user_tweet_vectors.gap_757) +
(tweet_gap.gap_758 * user_tweet_vectors.gap_758) +
(tweet_gap.gap_759 * user_tweet_vectors.gap_759) +
(tweet_gap.gap_760 * user_tweet_vectors.gap_760) +
(tweet_gap.gap_761 * user_tweet_vectors.gap_761) +
(tweet_gap.gap_762 * user_tweet_vectors.gap_762) +
(tweet_gap.gap_763 * user_tweet_vectors.gap_763) +
(tweet_gap.gap_764 * user_tweet_vectors.gap_764) +
(tweet_gap.gap_765 * user_tweet_vectors.gap_765) +
(tweet_gap.gap_766 * user_tweet_vectors.gap_766) +
(tweet_gap.gap_767 * user_tweet_vectors.gap_767)
) as dot_product_of_engaged_tweet_and_engaging_user
from {table_name} t
left join `recsys2020.pretrained_bert_gap` tweet_gap on t.tweet_id = tweet_gap.tweet_id
left join user_tweet_vectors on t.engaging_user_id = user_tweet_vectors.user_id
order by t.tweet_id, t.engaging_user_id
"""
if __name__ == "__main__":
BertSimilarityBetweenTweetAndTweetsUserVectorsFeature.main()
| 41.304079
| 90
| 0.712345
|
80ec3a501b72a2c357bc9f855c07b80e31abed0a
| 22,072
|
py
|
Python
|
tensorflow_addons/seq2seq/tests/beam_search_decoder_test.py
|
leondgarse/addons
|
6c512e1ec49ae14ca61f1bbf0fd3403204ba21e2
|
[
"Apache-2.0"
] | 1,560
|
2018-11-26T23:57:34.000Z
|
2022-03-27T10:37:34.000Z
|
tensorflow_addons/seq2seq/tests/beam_search_decoder_test.py
|
leondgarse/addons
|
6c512e1ec49ae14ca61f1bbf0fd3403204ba21e2
|
[
"Apache-2.0"
] | 2,067
|
2018-11-28T04:40:23.000Z
|
2022-03-31T11:36:50.000Z
|
tensorflow_addons/seq2seq/tests/beam_search_decoder_test.py
|
leondgarse/addons
|
6c512e1ec49ae14ca61f1bbf0fd3403204ba21e2
|
[
"Apache-2.0"
] | 679
|
2018-11-27T14:39:25.000Z
|
2022-03-31T10:09:22.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tfa.seq2seq.seq2seq.beam_search_decoder."""
import numpy as np
import pytest
import tensorflow as tf
from tensorflow_addons.seq2seq import attention_wrapper
from tensorflow_addons.seq2seq import beam_search_decoder, gather_tree
@pytest.mark.usefixtures("run_custom_and_py_ops")
def test_gather_tree():
# (max_time = 3, batch_size = 2, beam_width = 3)
# create (batch_size, max_time, beam_width) matrix and transpose it
predicted_ids = np.array(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[2, 3, 4], [5, 6, 7], [8, 9, 10]]],
dtype=np.int32,
).transpose([1, 0, 2])
parent_ids = np.array(
[[[0, 0, 0], [0, 1, 1], [2, 1, 2]], [[0, 0, 0], [1, 2, 0], [2, 1, 1]]],
dtype=np.int32,
).transpose([1, 0, 2])
# sequence_lengths is shaped (batch_size = 3)
max_sequence_lengths = [3, 3]
expected_result = np.array(
[[[2, 2, 2], [6, 5, 6], [7, 8, 9]], [[2, 4, 4], [7, 6, 6], [8, 9, 10]]]
).transpose([1, 0, 2])
res = gather_tree(
predicted_ids,
parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=11,
)
np.testing.assert_equal(expected_result, res)
def _test_gather_tree_from_array(depth_ndims=0, merged_batch_beam=False):
array = np.array(
[
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [0, 0, 0]],
[[2, 3, 4], [5, 6, 7], [8, 9, 10], [11, 12, 0]],
]
).transpose([1, 0, 2])
parent_ids = np.array(
[
[[0, 0, 0], [0, 1, 1], [2, 1, 2], [-1, -1, -1]],
[[0, 0, 0], [1, 1, 0], [2, 0, 1], [0, 1, 0]],
]
).transpose([1, 0, 2])
expected_array = np.array(
[
[[2, 2, 2], [6, 5, 6], [7, 8, 9], [0, 0, 0]],
[[2, 3, 2], [7, 5, 7], [8, 9, 8], [11, 12, 0]],
]
).transpose([1, 0, 2])
sequence_length = [[3, 3, 3], [4, 4, 3]]
array = tf.convert_to_tensor(array, dtype=tf.float32)
parent_ids = tf.convert_to_tensor(parent_ids, dtype=tf.int32)
expected_array = tf.convert_to_tensor(expected_array, dtype=tf.float32)
max_time = tf.shape(array)[0]
batch_size = tf.shape(array)[1]
beam_width = tf.shape(array)[2]
def _tile_in_depth(tensor):
# Generate higher rank tensors by concatenating tensor and
# tensor + 1.
for _ in range(depth_ndims):
tensor = tf.stack([tensor, tensor + 1], -1)
return tensor
if merged_batch_beam:
array = tf.reshape(array, [max_time, batch_size * beam_width])
expected_array = tf.reshape(expected_array, [max_time, batch_size * beam_width])
if depth_ndims > 0:
array = _tile_in_depth(array)
expected_array = _tile_in_depth(expected_array)
sorted_array = beam_search_decoder.gather_tree_from_array(
array, parent_ids, sequence_length
)
np.testing.assert_equal(expected_array.numpy(), sorted_array.numpy())
@pytest.mark.usefixtures("run_custom_and_py_ops")
def test_gather_tree_from_array_scalar():
_test_gather_tree_from_array()
@pytest.mark.usefixtures("run_custom_and_py_ops")
def test_gather_tree_from_array_1d():
_test_gather_tree_from_array(depth_ndims=1)
@pytest.mark.usefixtures("run_custom_and_py_ops")
def test_gather_tree_from_array_1d_with_merged_batch_beam():
_test_gather_tree_from_array(depth_ndims=1, merged_batch_beam=True)
@pytest.mark.usefixtures("run_custom_and_py_ops")
def test_gather_tree_from_array_2d():
_test_gather_tree_from_array(depth_ndims=2)
@pytest.mark.usefixtures("run_custom_and_py_ops")
def test_gather_tree_from_array_complex_trajectory():
# Max. time = 7, batch = 1, beam = 5.
array = np.expand_dims(
np.array(
[
[[25, 12, 114, 89, 97]],
[[9, 91, 64, 11, 162]],
[[34, 34, 34, 34, 34]],
[[2, 4, 2, 2, 4]],
[[2, 3, 6, 2, 2]],
[[2, 2, 2, 3, 2]],
[[2, 2, 2, 2, 2]],
]
),
-1,
)
parent_ids = np.array(
[
[[0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0]],
[[0, 1, 2, 3, 4]],
[[0, 0, 1, 2, 1]],
[[0, 1, 1, 2, 3]],
[[0, 1, 3, 1, 2]],
[[0, 1, 2, 3, 4]],
]
)
expected_array = np.expand_dims(
np.array(
[
[[25, 25, 25, 25, 25]],
[[9, 9, 91, 9, 9]],
[[34, 34, 34, 34, 34]],
[[2, 4, 2, 4, 4]],
[[2, 3, 6, 3, 6]],
[[2, 2, 2, 3, 2]],
[[2, 2, 2, 2, 2]],
]
),
-1,
)
sequence_length = [[4, 6, 4, 7, 6]]
array = tf.convert_to_tensor(array, dtype=tf.float32)
parent_ids = tf.convert_to_tensor(parent_ids, dtype=tf.int32)
expected_array = tf.convert_to_tensor(expected_array, dtype=tf.float32)
sorted_array = beam_search_decoder.gather_tree_from_array(
array, parent_ids, sequence_length
)
np.testing.assert_equal(expected_array.numpy(), sorted_array.numpy())
def basic_test_array_shape_dynamic_checks(
static_shape, dynamic_shape, batch_size, beam_width, is_valid=True
):
@tf.function(input_signature=(tf.TensorSpec(dynamic_shape, dtype=tf.float32),))
def _test_body(t):
beam_search_decoder._check_batch_beam(t, batch_size, beam_width)
t = tf.random.uniform(static_shape, dtype=tf.float32)
if is_valid:
_test_body(t)
else:
with pytest.raises(tf.errors.InvalidArgumentError):
_test_body(t)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_array_shape_dynamic_checks():
basic_test_array_shape_dynamic_checks(
(8, 4, 5, 10), (None, None, 5, 10), 4, 5, is_valid=True
)
basic_test_array_shape_dynamic_checks(
(8, 20, 10), (None, None, 10), 4, 5, is_valid=True
)
basic_test_array_shape_dynamic_checks(
(8, 21, 10), (None, None, 10), 4, 5, is_valid=False
)
basic_test_array_shape_dynamic_checks(
(8, 4, 6, 10), (None, None, None, 10), 4, 5, is_valid=False
)
basic_test_array_shape_dynamic_checks((8, 4), (None, None), 4, 5, is_valid=False)
def test_array_shape_static_checks():
assert (
beam_search_decoder._check_static_batch_beam_maybe(
tf.TensorShape([None, None, None]), 3, 5
)
is True
)
assert (
beam_search_decoder._check_static_batch_beam_maybe(
tf.TensorShape([15, None, None]), 3, 5
)
is True
)
assert (
beam_search_decoder._check_static_batch_beam_maybe(
tf.TensorShape([16, None, None]), 3, 5
)
is False
)
assert (
beam_search_decoder._check_static_batch_beam_maybe(
tf.TensorShape([3, 5, None]), 3, 5
)
is True
)
assert (
beam_search_decoder._check_static_batch_beam_maybe(
tf.TensorShape([3, 6, None]), 3, 5
)
is False
)
assert (
beam_search_decoder._check_static_batch_beam_maybe(
tf.TensorShape([5, 3, None]), 3, 5
)
is False
)
def test_eos_masking():
probs = tf.constant(
[
[
[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.3, -0.3, -0.3, 3, 0],
[5, 6, 0, 0, 0],
],
[[-0.2, -0.2, -0.2, -0.2, 0], [-0.3, -0.3, -0.1, 3, 0], [5, 6, 3, 0, 0]],
]
)
eos_token = 0
previously_finished = np.array([[0, 1, 0], [0, 1, 1]], dtype=bool)
masked = beam_search_decoder._mask_probs(probs, eos_token, previously_finished)
masked = masked.numpy()
np.testing.assert_equal(probs[0][0], masked[0][0])
np.testing.assert_equal(probs[0][2], masked[0][2])
np.testing.assert_equal(probs[1][0], masked[1][0])
np.testing.assert_equal(masked[0][1][0], 0)
np.testing.assert_equal(masked[1][1][0], 0)
np.testing.assert_equal(masked[1][2][0], 0)
for i in range(1, 5):
np.testing.assert_allclose(masked[0][1][i], np.finfo("float32").min)
np.testing.assert_allclose(masked[1][1][i], np.finfo("float32").min)
np.testing.assert_allclose(masked[1][2][i], np.finfo("float32").min)
def test_missing_embedding_fn():
batch_size = 6
beam_width = 4
cell = tf.keras.layers.LSTMCell(5)
decoder = beam_search_decoder.BeamSearchDecoder(cell, beam_width=beam_width)
initial_state = cell.get_initial_state(
batch_size=batch_size * beam_width, dtype=tf.float32
)
start_tokens = tf.ones([batch_size], dtype=tf.int32)
end_token = tf.constant(2, dtype=tf.int32)
with pytest.raises(ValueError):
decoder(None, start_tokens, end_token, initial_state)
def test_beam_step():
batch_size = 2
beam_width = 3
vocab_size = 5
end_token = 0
length_penalty_weight = 0.6
coverage_penalty_weight = 0.0
output_all_scores = False
dummy_cell_state = tf.zeros([batch_size, beam_width])
beam_state = beam_search_decoder.BeamSearchDecoderState(
cell_state=dummy_cell_state,
log_probs=tf.nn.log_softmax(tf.ones([batch_size, beam_width])),
lengths=tf.constant(2, shape=[batch_size, beam_width], dtype=tf.int64),
finished=tf.zeros([batch_size, beam_width], dtype=tf.bool),
accumulated_attention_probs=(),
)
logits_ = np.full([batch_size, beam_width, vocab_size], 0.0001)
logits_[0, 0, 2] = 1.9
logits_[0, 0, 3] = 2.1
logits_[0, 1, 3] = 3.1
logits_[0, 1, 4] = 0.9
logits_[1, 0, 1] = 0.5
logits_[1, 1, 2] = 2.7
logits_[1, 2, 2] = 10.0
logits_[1, 2, 3] = 0.2
logits = tf.convert_to_tensor(logits_, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits)
outputs, next_beam_state = beam_search_decoder._beam_search_step(
time=2,
logits=logits,
next_cell_state=dummy_cell_state,
beam_state=beam_state,
batch_size=tf.convert_to_tensor(batch_size),
beam_width=beam_width,
end_token=end_token,
length_penalty_weight=length_penalty_weight,
coverage_penalty_weight=coverage_penalty_weight,
output_all_scores=output_all_scores,
)
outputs_, next_state_, state_, log_probs_ = [
outputs,
next_beam_state,
beam_state,
log_probs,
]
np.testing.assert_equal(
outputs_.predicted_ids.numpy(), np.asanyarray([[3, 3, 2], [2, 2, 1]])
)
np.testing.assert_equal(
outputs_.parent_ids.numpy(), np.asanyarray([[1, 0, 0], [2, 1, 0]])
)
np.testing.assert_equal(
next_state_.lengths.numpy(), np.asanyarray([[3, 3, 3], [3, 3, 3]])
)
np.testing.assert_equal(
next_state_.finished.numpy(),
np.asanyarray([[False, False, False], [False, False, False]]),
)
expected_log_probs = []
expected_log_probs.append(state_.log_probs[0].numpy())
expected_log_probs.append(state_.log_probs[1].numpy())
expected_log_probs[0][0] += log_probs_[0, 1, 3]
expected_log_probs[0][1] += log_probs_[0, 0, 3]
expected_log_probs[0][2] += log_probs_[0, 0, 2]
expected_log_probs[1][0] += log_probs_[1, 2, 2]
expected_log_probs[1][1] += log_probs_[1, 1, 2]
expected_log_probs[1][2] += log_probs_[1, 0, 1]
np.testing.assert_equal(
next_state_.log_probs.numpy(), np.asanyarray(expected_log_probs)
)
def test_step_with_eos():
batch_size = 2
beam_width = 3
vocab_size = 5
end_token = 0
length_penalty_weight = 0.6
coverage_penalty_weight = 0.0
output_all_scores = False
dummy_cell_state = tf.zeros([batch_size, beam_width])
beam_state = beam_search_decoder.BeamSearchDecoderState(
cell_state=dummy_cell_state,
log_probs=tf.nn.log_softmax(tf.ones([batch_size, beam_width])),
lengths=tf.convert_to_tensor([[2, 1, 2], [2, 2, 1]], dtype=tf.int64),
finished=tf.convert_to_tensor(
[[False, True, False], [False, False, True]], dtype=tf.bool
),
accumulated_attention_probs=(),
)
logits_ = np.full([batch_size, beam_width, vocab_size], 0.0001)
logits_[0, 0, 2] = 1.9
logits_[0, 0, 3] = 2.1
logits_[0, 1, 3] = 3.1
logits_[0, 1, 4] = 0.9
logits_[1, 0, 1] = 0.5
logits_[1, 1, 2] = 5.7 # why does this not work when it's 2.7?
logits_[1, 2, 2] = 1.0
logits_[1, 2, 3] = 0.2
logits = tf.convert_to_tensor(logits_, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits)
outputs, next_beam_state = beam_search_decoder._beam_search_step(
time=2,
logits=logits,
next_cell_state=dummy_cell_state,
beam_state=beam_state,
batch_size=tf.convert_to_tensor(batch_size),
beam_width=beam_width,
end_token=end_token,
length_penalty_weight=length_penalty_weight,
coverage_penalty_weight=coverage_penalty_weight,
output_all_scores=output_all_scores,
)
outputs_, next_state_, state_, log_probs_ = [
outputs,
next_beam_state,
beam_state,
log_probs,
]
np.testing.assert_equal(
outputs_.parent_ids.numpy(), np.asanyarray([[1, 0, 0], [1, 2, 0]])
)
np.testing.assert_equal(
outputs_.predicted_ids.numpy(), np.asanyarray([[0, 3, 2], [2, 0, 1]])
)
np.testing.assert_equal(
next_state_.lengths.numpy(), np.asanyarray([[1, 3, 3], [3, 1, 3]])
)
np.testing.assert_equal(
next_state_.finished.numpy(),
np.asanyarray([[True, False, False], [False, True, False]]),
)
expected_log_probs = []
expected_log_probs.append(state_.log_probs[0].numpy())
expected_log_probs.append(state_.log_probs[1].numpy())
expected_log_probs[0][1] += log_probs_[0, 0, 3]
expected_log_probs[0][2] += log_probs_[0, 0, 2]
expected_log_probs[1][0] += log_probs_[1, 1, 2]
expected_log_probs[1][2] += log_probs_[1, 0, 1]
np.testing.assert_equal(
next_state_.log_probs.numpy(), np.asanyarray(expected_log_probs)
)
def test_large_beam_step():
batch_size = 2
beam_width = 8
vocab_size = 5
end_token = 0
length_penalty_weight = 0.6
coverage_penalty_weight = 0.0
output_all_scores = False
def get_probs():
"""this simulates the initialize method in BeamSearchDecoder."""
log_prob_mask = tf.one_hot(
tf.zeros([batch_size], dtype=tf.int32),
depth=beam_width,
on_value=True,
off_value=False,
dtype=tf.bool,
)
log_prob_zeros = tf.zeros([batch_size, beam_width], dtype=tf.float32)
log_prob_neg_inf = tf.ones([batch_size, beam_width], dtype=tf.float32) * -np.Inf
log_probs = tf.where(log_prob_mask, log_prob_zeros, log_prob_neg_inf)
return log_probs
log_probs = get_probs()
dummy_cell_state = tf.zeros([batch_size, beam_width])
_finished = tf.one_hot(
tf.zeros([batch_size], dtype=tf.int32),
depth=beam_width,
on_value=False,
off_value=True,
dtype=tf.bool,
)
_lengths = np.zeros([batch_size, beam_width], dtype=np.int64)
_lengths[:, 0] = 2
_lengths = tf.constant(_lengths, dtype=tf.int64)
beam_state = beam_search_decoder.BeamSearchDecoderState(
cell_state=dummy_cell_state,
log_probs=log_probs,
lengths=_lengths,
finished=_finished,
accumulated_attention_probs=(),
)
logits_ = np.full([batch_size, beam_width, vocab_size], 0.0001)
logits_[0, 0, 2] = 1.9
logits_[0, 0, 3] = 2.1
logits_[0, 1, 3] = 3.1
logits_[0, 1, 4] = 0.9
logits_[1, 0, 1] = 0.5
logits_[1, 1, 2] = 2.7
logits_[1, 2, 2] = 10.0
logits_[1, 2, 3] = 0.2
logits = tf.constant(logits_, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits)
outputs, next_beam_state = beam_search_decoder._beam_search_step(
time=2,
logits=logits,
next_cell_state=dummy_cell_state,
beam_state=beam_state,
batch_size=tf.convert_to_tensor(batch_size),
beam_width=beam_width,
end_token=end_token,
length_penalty_weight=length_penalty_weight,
coverage_penalty_weight=coverage_penalty_weight,
output_all_scores=output_all_scores,
)
outputs_, next_state_ = [outputs, next_beam_state]
assert outputs_.predicted_ids[0, 0] == 3
assert outputs_.predicted_ids[0, 1] == 2
assert outputs_.predicted_ids[1, 0] == 1
neg_inf = -np.Inf
np.testing.assert_equal(
next_state_.log_probs[:, -3:].numpy(),
np.asanyarray([[neg_inf, neg_inf, neg_inf], [neg_inf, neg_inf, neg_inf]]),
)
np.testing.assert_equal(
np.asanyarray(next_state_.log_probs[:, :-3] > neg_inf), True
)
np.testing.assert_equal(np.asanyarray(next_state_.lengths[:, :-3] > 0), True)
np.testing.assert_equal(
next_state_.lengths[:, -3:].numpy(), np.asanyarray([[0, 0, 0], [0, 0, 0]])
)
@pytest.mark.parametrize("output_all_scores", [True, False])
@pytest.mark.parametrize("with_alignment_history", [True, False])
@pytest.mark.parametrize("has_attention", [True, False])
@pytest.mark.parametrize("time_major", [True, False])
@pytest.mark.parametrize(
"cell_class", [tf.keras.layers.LSTMCell, tf.keras.layers.GRUCell]
)
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.usefixtures("run_custom_and_py_ops")
def test_beam_search_decoder(
cell_class, time_major, has_attention, with_alignment_history, output_all_scores
):
encoder_sequence_length = np.array([3, 2, 3, 1, 1])
batch_size = 5
decoder_max_time = 4
input_depth = 7
cell_depth = 9
attention_depth = 6
vocab_size = 20
end_token = vocab_size - 1
start_token = 0
embedding_dim = 50
maximum_iterations = 3
output_layer = tf.keras.layers.Dense(vocab_size, use_bias=True, activation=None)
beam_width = 3
embedding = tf.random.normal([vocab_size, embedding_dim])
cell = cell_class(cell_depth)
if has_attention:
attention_mechanism = attention_wrapper.BahdanauAttention(
units=attention_depth,
)
cell = attention_wrapper.AttentionWrapper(
cell=cell,
attention_mechanism=attention_mechanism,
attention_layer_size=attention_depth,
alignment_history=with_alignment_history,
)
coverage_penalty_weight = 0.2
else:
coverage_penalty_weight = 0.0
bsd = beam_search_decoder.BeamSearchDecoder(
cell=cell,
beam_width=beam_width,
output_layer=output_layer,
length_penalty_weight=0.0,
coverage_penalty_weight=coverage_penalty_weight,
output_time_major=time_major,
maximum_iterations=maximum_iterations,
output_all_scores=output_all_scores,
)
@tf.function(
input_signature=(
tf.TensorSpec([None, None, input_depth], dtype=tf.float32),
tf.TensorSpec([None], dtype=tf.int32),
)
)
def _beam_decode_from(memory, memory_sequence_length):
batch_size_tensor = tf.shape(memory)[0]
if has_attention:
tiled_memory = beam_search_decoder.tile_batch(memory, multiplier=beam_width)
tiled_memory_sequence_length = beam_search_decoder.tile_batch(
memory_sequence_length, multiplier=beam_width
)
attention_mechanism.setup_memory(
tiled_memory, memory_sequence_length=tiled_memory_sequence_length
)
cell_state = cell.get_initial_state(
batch_size=batch_size_tensor * beam_width, dtype=tf.float32
)
return bsd(
embedding,
start_tokens=tf.fill([batch_size_tensor], start_token),
end_token=end_token,
initial_state=cell_state,
)
memory = tf.random.normal([batch_size, decoder_max_time, input_depth])
memory_sequence_length = tf.constant(encoder_sequence_length, dtype=tf.int32)
final_outputs, final_state, final_sequence_lengths = _beam_decode_from(
memory, memory_sequence_length
)
def _t(shape):
if time_major:
return (shape[1], shape[0]) + shape[2:]
return shape
assert isinstance(final_outputs, beam_search_decoder.FinalBeamSearchDecoderOutput)
assert isinstance(final_state, beam_search_decoder.BeamSearchDecoderState)
beam_search_decoder_output = final_outputs.beam_search_decoder_output
max_sequence_length = np.max(final_sequence_lengths.numpy())
assert _t((batch_size, max_sequence_length, beam_width)) == tuple(
final_outputs.predicted_ids.shape.as_list()
)
if output_all_scores:
assert _t((batch_size, max_sequence_length, beam_width, vocab_size)) == tuple(
beam_search_decoder_output.scores.shape.as_list()
)
# Check that the vocab size corresponds to the dimensions of the output.
assert (beam_width, vocab_size) == tuple(bsd.output_size.scores.as_list())
else:
assert _t((batch_size, max_sequence_length, beam_width)) == tuple(
beam_search_decoder_output.scores.shape.as_list()
)
# Check only the beam width corresponds to the dimensions of the output.
assert (beam_width,) == tuple(bsd.output_size.scores.as_list())
| 33.34139
| 88
| 0.62577
|
887885085ba4682c5d3c658760961a3c28dba7b2
| 52,406
|
py
|
Python
|
zerver/lib/message.py
|
Awawdi/zulip
|
f2e7b92b024cfec6a9babace8298de9537925347
|
[
"Apache-2.0"
] | 2
|
2018-09-24T14:12:46.000Z
|
2018-09-24T14:12:59.000Z
|
zerver/lib/message.py
|
adb-web-designs/zulip
|
1b303e7b2f0271f81265123ad1e7125ed3914d68
|
[
"Apache-2.0"
] | 18
|
2021-08-14T01:12:30.000Z
|
2022-03-04T20:28:51.000Z
|
zerver/lib/message.py
|
adb-web-designs/zulip
|
1b303e7b2f0271f81265123ad1e7125ed3914d68
|
[
"Apache-2.0"
] | 1
|
2020-07-09T23:28:34.000Z
|
2020-07-09T23:28:34.000Z
|
import copy
import datetime
import zlib
from dataclasses import dataclass, field
from typing import Any, Dict, List, Mapping, Optional, Sequence, Set, Tuple, Union
import ahocorasick
import orjson
from django.conf import settings
from django.db import connection
from django.db.models import Max, Sum
from django.utils.timezone import now as timezone_now
from django.utils.translation import gettext as _
from psycopg2.sql import SQL
from typing_extensions import TypedDict
from analytics.lib.counts import COUNT_STATS
from analytics.models import RealmCount
from zerver.lib.avatar import get_avatar_field
from zerver.lib.cache import (
cache_with_key,
generic_bulk_cached_fetch,
to_dict_cache_key,
to_dict_cache_key_id,
)
from zerver.lib.display_recipient import bulk_fetch_display_recipients
from zerver.lib.exceptions import JsonableError, MissingAuthenticationError
from zerver.lib.markdown import MessageRenderingResult, markdown_convert, topic_links
from zerver.lib.markdown import version as markdown_version
from zerver.lib.mention import MentionData
from zerver.lib.request import RequestVariableConversionError
from zerver.lib.stream_subscription import (
get_stream_subscriptions_for_user,
get_subscribed_stream_recipient_ids_for_user,
num_subscribers_for_stream_id,
)
from zerver.lib.streams import get_web_public_streams_queryset
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.topic import DB_TOPIC_NAME, MESSAGE__TOPIC, TOPIC_LINKS, TOPIC_NAME
from zerver.lib.topic_mutes import build_topic_mute_checker, topic_is_muted
from zerver.lib.types import DisplayRecipientT, UserDisplayRecipient
from zerver.models import (
MAX_TOPIC_NAME_LENGTH,
Message,
Reaction,
Realm,
Recipient,
Stream,
SubMessage,
Subscription,
UserMessage,
UserProfile,
get_display_recipient_by_id,
get_usermessage_by_message_id,
query_for_ids,
)
class RawReactionRow(TypedDict):
emoji_code: str
emoji_name: str
message_id: int
reaction_type: str
user_profile__email: str
user_profile__full_name: str
user_profile_id: int
class RawUnreadStreamDict(TypedDict):
stream_id: int
topic: str
class RawUnreadPrivateMessageDict(TypedDict):
sender_id: int
class RawUnreadHuddleDict(TypedDict):
user_ids_string: str
class RawUnreadMessagesResult(TypedDict):
pm_dict: Dict[int, RawUnreadPrivateMessageDict]
stream_dict: Dict[int, RawUnreadStreamDict]
huddle_dict: Dict[int, RawUnreadHuddleDict]
mentions: Set[int]
muted_stream_ids: List[int]
unmuted_stream_msgs: Set[int]
old_unreads_missing: bool
class UnreadMessagesResult(TypedDict):
pms: List[Dict[str, Any]]
streams: List[Dict[str, Any]]
huddles: List[Dict[str, Any]]
mentions: List[int]
count: int
old_unreads_missing: bool
@dataclass
class SendMessageRequest:
message: Message
rendering_result: MessageRenderingResult
stream: Optional[Stream]
local_id: Optional[str]
sender_queue_id: Optional[str]
realm: Realm
mention_data: MentionData
mentioned_user_groups_map: Dict[int, int]
active_user_ids: Set[int]
online_push_user_ids: Set[int]
pm_mention_push_disabled_user_ids: Set[int]
pm_mention_email_disabled_user_ids: Set[int]
stream_push_user_ids: Set[int]
stream_email_user_ids: Set[int]
muted_sender_user_ids: Set[int]
um_eligible_user_ids: Set[int]
long_term_idle_user_ids: Set[int]
default_bot_user_ids: Set[int]
service_bot_tuples: List[Tuple[int, int]]
all_bot_user_ids: Set[int]
wildcard_mention_user_ids: Set[int]
links_for_embed: Set[str]
widget_content: Optional[Dict[str, Any]]
submessages: List[Dict[str, Any]] = field(default_factory=list)
deliver_at: Optional[datetime.datetime] = None
delivery_type: Optional[str] = None
# We won't try to fetch more unread message IDs from the database than
# this limit. The limit is super high, in large part because it means
# client-side code mostly doesn't need to think about the case that a
# user has more older unread messages that were cut off.
MAX_UNREAD_MESSAGES = 50000
def truncate_content(content: str, max_length: int, truncation_message: str) -> str:
if len(content) > max_length:
content = content[: max_length - len(truncation_message)] + truncation_message
return content
def normalize_body(body: str) -> str:
body = body.rstrip().lstrip("\n")
if len(body) == 0:
raise JsonableError(_("Message must not be empty"))
if "\x00" in body:
raise JsonableError(_("Message must not contain null bytes"))
return truncate_content(body, settings.MAX_MESSAGE_LENGTH, "\n[message truncated]")
def truncate_topic(topic: str) -> str:
return truncate_content(topic, MAX_TOPIC_NAME_LENGTH, "...")
def messages_for_ids(
message_ids: List[int],
user_message_flags: Dict[int, List[str]],
search_fields: Dict[int, Dict[str, str]],
apply_markdown: bool,
client_gravatar: bool,
allow_edit_history: bool,
) -> List[Dict[str, Any]]:
cache_transformer = MessageDict.build_dict_from_raw_db_row
id_fetcher = lambda row: row["id"]
message_dicts = generic_bulk_cached_fetch(
to_dict_cache_key_id,
MessageDict.get_raw_db_rows,
message_ids,
id_fetcher=id_fetcher,
cache_transformer=cache_transformer,
extractor=extract_message_dict,
setter=stringify_message_dict,
)
message_list: List[Dict[str, Any]] = []
for message_id in message_ids:
msg_dict = message_dicts[message_id]
msg_dict.update(flags=user_message_flags[message_id])
if message_id in search_fields:
msg_dict.update(search_fields[message_id])
# Make sure that we never send message edit history to clients
# in realms with allow_edit_history disabled.
if "edit_history" in msg_dict and not allow_edit_history:
del msg_dict["edit_history"]
message_list.append(msg_dict)
MessageDict.post_process_dicts(message_list, apply_markdown, client_gravatar)
return message_list
def sew_messages_and_reactions(
messages: List[Dict[str, Any]], reactions: List[Dict[str, Any]]
) -> List[Dict[str, Any]]:
"""Given a iterable of messages and reactions stitch reactions
into messages.
"""
# Add all messages with empty reaction item
for message in messages:
message["reactions"] = []
# Convert list of messages into dictionary to make reaction stitching easy
converted_messages = {message["id"]: message for message in messages}
for reaction in reactions:
converted_messages[reaction["message_id"]]["reactions"].append(reaction)
return list(converted_messages.values())
def sew_messages_and_submessages(
messages: List[Dict[str, Any]], submessages: List[Dict[str, Any]]
) -> None:
# This is super similar to sew_messages_and_reactions.
for message in messages:
message["submessages"] = []
message_dict = {message["id"]: message for message in messages}
for submessage in submessages:
message_id = submessage["message_id"]
if message_id in message_dict:
message = message_dict[message_id]
message["submessages"].append(submessage)
def extract_message_dict(message_bytes: bytes) -> Dict[str, Any]:
return orjson.loads(zlib.decompress(message_bytes))
def stringify_message_dict(message_dict: Dict[str, Any]) -> bytes:
return zlib.compress(orjson.dumps(message_dict))
@cache_with_key(to_dict_cache_key, timeout=3600 * 24)
def message_to_dict_json(message: Message, realm_id: Optional[int] = None) -> bytes:
return MessageDict.to_dict_uncached([message], realm_id)[message.id]
def save_message_rendered_content(message: Message, content: str) -> str:
rendering_result = render_markdown(message, content, realm=message.get_realm())
rendered_content = None
if rendering_result is not None:
rendered_content = rendering_result.rendered_content
message.rendered_content = rendered_content
message.rendered_content_version = markdown_version
message.save_rendered_content()
return rendered_content
class MessageDict:
"""MessageDict is the core class responsible for marshalling Message
objects obtained from the database into a format that can be sent
to clients via the Zulip API, whether via `GET /messages`,
outgoing webhooks, or other code paths. There are two core flows through
which this class is used:
* For just-sent messages, we construct a single `wide_dict` object
containing all the data for the message and the related
UserProfile models (sender_info and recipient_info); this object
can be stored in queues, caches, etc., and then later turned
into an API-format JSONable dictionary via finalize_payload.
* When fetching messages from the database, we fetch their data in
bulk using messages_for_ids, which makes use of caching, bulk
fetches that skip the Django ORM, etc., to provide an optimized
interface for fetching hundreds of thousands of messages from
the database and then turning them into API-format JSON
dictionaries.
"""
@staticmethod
def wide_dict(message: Message, realm_id: Optional[int] = None) -> Dict[str, Any]:
"""
The next two lines get the cacheable field related
to our message object, with the side effect of
populating the cache.
"""
json = message_to_dict_json(message, realm_id)
obj = extract_message_dict(json)
"""
The steps below are similar to what we do in
post_process_dicts(), except we don't call finalize_payload(),
since that step happens later in the queue
processor.
"""
MessageDict.bulk_hydrate_sender_info([obj])
MessageDict.bulk_hydrate_recipient_info([obj])
return obj
@staticmethod
def post_process_dicts(
objs: List[Dict[str, Any]], apply_markdown: bool, client_gravatar: bool
) -> None:
"""
NOTE: This function mutates the objects in
the `objs` list, rather than making
shallow copies. It might be safer to
make shallow copies here, but performance
is somewhat important here, as we are
often fetching hundreds of messages.
"""
MessageDict.bulk_hydrate_sender_info(objs)
MessageDict.bulk_hydrate_recipient_info(objs)
for obj in objs:
MessageDict.finalize_payload(obj, apply_markdown, client_gravatar, skip_copy=True)
@staticmethod
def finalize_payload(
obj: Dict[str, Any],
apply_markdown: bool,
client_gravatar: bool,
keep_rendered_content: bool = False,
skip_copy: bool = False,
) -> Dict[str, Any]:
"""
By default, we make a shallow copy of the incoming dict to avoid
mutation-related bugs. Code paths that are passing a unique object
can pass skip_copy=True to avoid this extra work.
"""
if not skip_copy:
obj = copy.copy(obj)
MessageDict.set_sender_avatar(obj, client_gravatar)
if apply_markdown:
obj["content_type"] = "text/html"
obj["content"] = obj["rendered_content"]
else:
obj["content_type"] = "text/x-markdown"
if not keep_rendered_content:
del obj["rendered_content"]
del obj["sender_realm_id"]
del obj["sender_avatar_source"]
del obj["sender_delivery_email"]
del obj["sender_avatar_version"]
del obj["recipient_type"]
del obj["recipient_type_id"]
del obj["sender_is_mirror_dummy"]
return obj
@staticmethod
def sew_submessages_and_reactions_to_msgs(
messages: List[Dict[str, Any]]
) -> List[Dict[str, Any]]:
msg_ids = [msg["id"] for msg in messages]
submessages = SubMessage.get_raw_db_rows(msg_ids)
sew_messages_and_submessages(messages, submessages)
reactions = Reaction.get_raw_db_rows(msg_ids)
return sew_messages_and_reactions(messages, reactions)
@staticmethod
def to_dict_uncached(
messages: List[Message], realm_id: Optional[int] = None
) -> Dict[int, bytes]:
messages_dict = MessageDict.to_dict_uncached_helper(messages, realm_id)
encoded_messages = {msg["id"]: stringify_message_dict(msg) for msg in messages_dict}
return encoded_messages
@staticmethod
def to_dict_uncached_helper(
messages: List[Message], realm_id: Optional[int] = None
) -> List[Dict[str, Any]]:
# Near duplicate of the build_message_dict + get_raw_db_rows
# code path that accepts already fetched Message objects
# rather than message IDs.
def get_rendering_realm_id(message: Message) -> int:
# realm_id can differ among users, currently only possible
# with cross realm bots.
if realm_id is not None:
return realm_id
if message.recipient.type == Recipient.STREAM:
return Stream.objects.get(id=message.recipient.type_id).realm_id
return message.sender.realm_id
message_rows = [
{
"id": message.id,
DB_TOPIC_NAME: message.topic_name(),
"date_sent": message.date_sent,
"last_edit_time": message.last_edit_time,
"edit_history": message.edit_history,
"content": message.content,
"rendered_content": message.rendered_content,
"rendered_content_version": message.rendered_content_version,
"recipient_id": message.recipient.id,
"recipient__type": message.recipient.type,
"recipient__type_id": message.recipient.type_id,
"rendering_realm_id": get_rendering_realm_id(message),
"sender_id": message.sender.id,
"sending_client__name": message.sending_client.name,
"sender__realm_id": message.sender.realm_id,
}
for message in messages
]
MessageDict.sew_submessages_and_reactions_to_msgs(message_rows)
return [MessageDict.build_dict_from_raw_db_row(row) for row in message_rows]
@staticmethod
def get_raw_db_rows(needed_ids: List[int]) -> List[Dict[str, Any]]:
# This is a special purpose function optimized for
# callers like get_messages_backend().
fields = [
"id",
DB_TOPIC_NAME,
"date_sent",
"last_edit_time",
"edit_history",
"content",
"rendered_content",
"rendered_content_version",
"recipient_id",
"recipient__type",
"recipient__type_id",
"sender_id",
"sending_client__name",
"sender__realm_id",
]
messages = Message.objects.filter(id__in=needed_ids).values(*fields)
return MessageDict.sew_submessages_and_reactions_to_msgs(messages)
@staticmethod
def build_dict_from_raw_db_row(row: Dict[str, Any]) -> Dict[str, Any]:
"""
row is a row from a .values() call, and it needs to have
all the relevant fields populated
"""
return MessageDict.build_message_dict(
message_id=row["id"],
last_edit_time=row["last_edit_time"],
edit_history=row["edit_history"],
content=row["content"],
topic_name=row[DB_TOPIC_NAME],
date_sent=row["date_sent"],
rendered_content=row["rendered_content"],
rendered_content_version=row["rendered_content_version"],
sender_id=row["sender_id"],
sender_realm_id=row["sender__realm_id"],
sending_client_name=row["sending_client__name"],
rendering_realm_id=row.get("rendering_realm_id", row["sender__realm_id"]),
recipient_id=row["recipient_id"],
recipient_type=row["recipient__type"],
recipient_type_id=row["recipient__type_id"],
reactions=row["reactions"],
submessages=row["submessages"],
)
@staticmethod
def build_message_dict(
message_id: int,
last_edit_time: Optional[datetime.datetime],
edit_history: Optional[str],
content: str,
topic_name: str,
date_sent: datetime.datetime,
rendered_content: Optional[str],
rendered_content_version: Optional[int],
sender_id: int,
sender_realm_id: int,
sending_client_name: str,
rendering_realm_id: int,
recipient_id: int,
recipient_type: int,
recipient_type_id: int,
reactions: List[RawReactionRow],
submessages: List[Dict[str, Any]],
) -> Dict[str, Any]:
obj = dict(
id=message_id,
sender_id=sender_id,
content=content,
recipient_type_id=recipient_type_id,
recipient_type=recipient_type,
recipient_id=recipient_id,
timestamp=datetime_to_timestamp(date_sent),
client=sending_client_name,
)
obj[TOPIC_NAME] = topic_name
obj["sender_realm_id"] = sender_realm_id
# Render topic_links with the stream's realm instead of the
# sender's realm; this is important for messages sent by
# cross-realm bots like NOTIFICATION_BOT.
obj[TOPIC_LINKS] = topic_links(rendering_realm_id, topic_name)
if last_edit_time is not None:
obj["last_edit_timestamp"] = datetime_to_timestamp(last_edit_time)
assert edit_history is not None
obj["edit_history"] = orjson.loads(edit_history)
if Message.need_to_render_content(
rendered_content, rendered_content_version, markdown_version
):
# We really shouldn't be rendering objects in this method, but there is
# a scenario where we upgrade the version of Markdown and fail to run
# management commands to re-render historical messages, and then we
# need to have side effects. This method is optimized to not need full
# blown ORM objects, but the Markdown renderer is unfortunately highly
# coupled to Message, and we also need to persist the new rendered content.
# If we don't have a message object passed in, we get one here. The cost
# of going to the DB here should be overshadowed by the cost of rendering
# and updating the row.
# TODO: see #1379 to eliminate Markdown dependencies
message = Message.objects.select_related().get(id=message_id)
assert message is not None # Hint for mypy.
# It's unfortunate that we need to have side effects on the message
# in some cases.
rendered_content = save_message_rendered_content(message, content)
if rendered_content is not None:
obj["rendered_content"] = rendered_content
else:
obj["rendered_content"] = (
"<p>[Zulip note: Sorry, we could not "
+ "understand the formatting of your message]</p>"
)
if rendered_content is not None:
obj["is_me_message"] = Message.is_status_message(content, rendered_content)
else:
obj["is_me_message"] = False
obj["reactions"] = [
ReactionDict.build_dict_from_raw_db_row(reaction) for reaction in reactions
]
obj["submessages"] = submessages
return obj
@staticmethod
def bulk_hydrate_sender_info(objs: List[Dict[str, Any]]) -> None:
sender_ids = list({obj["sender_id"] for obj in objs})
if not sender_ids:
return
query = UserProfile.objects.values(
"id",
"full_name",
"delivery_email",
"email",
"realm__string_id",
"avatar_source",
"avatar_version",
"is_mirror_dummy",
)
rows = query_for_ids(query, sender_ids, "zerver_userprofile.id")
sender_dict = {row["id"]: row for row in rows}
for obj in objs:
sender_id = obj["sender_id"]
user_row = sender_dict[sender_id]
obj["sender_full_name"] = user_row["full_name"]
obj["sender_email"] = user_row["email"]
obj["sender_delivery_email"] = user_row["delivery_email"]
obj["sender_realm_str"] = user_row["realm__string_id"]
obj["sender_avatar_source"] = user_row["avatar_source"]
obj["sender_avatar_version"] = user_row["avatar_version"]
obj["sender_is_mirror_dummy"] = user_row["is_mirror_dummy"]
@staticmethod
def hydrate_recipient_info(obj: Dict[str, Any], display_recipient: DisplayRecipientT) -> None:
"""
This method hyrdrates recipient info with things
like full names and emails of senders. Eventually
our clients should be able to hyrdrate these fields
themselves with info they already have on users.
"""
recipient_type = obj["recipient_type"]
recipient_type_id = obj["recipient_type_id"]
sender_is_mirror_dummy = obj["sender_is_mirror_dummy"]
sender_email = obj["sender_email"]
sender_full_name = obj["sender_full_name"]
sender_id = obj["sender_id"]
if recipient_type == Recipient.STREAM:
display_type = "stream"
elif recipient_type in (Recipient.HUDDLE, Recipient.PERSONAL):
assert not isinstance(display_recipient, str)
display_type = "private"
if len(display_recipient) == 1:
# add the sender in if this isn't a message between
# someone and themself, preserving ordering
recip: UserDisplayRecipient = {
"email": sender_email,
"full_name": sender_full_name,
"id": sender_id,
"is_mirror_dummy": sender_is_mirror_dummy,
}
if recip["email"] < display_recipient[0]["email"]:
display_recipient = [recip, display_recipient[0]]
elif recip["email"] > display_recipient[0]["email"]:
display_recipient = [display_recipient[0], recip]
else:
raise AssertionError(f"Invalid recipient type {recipient_type}")
obj["display_recipient"] = display_recipient
obj["type"] = display_type
if obj["type"] == "stream":
obj["stream_id"] = recipient_type_id
@staticmethod
def bulk_hydrate_recipient_info(objs: List[Dict[str, Any]]) -> None:
recipient_tuples = { # We use set to eliminate duplicate tuples.
(
obj["recipient_id"],
obj["recipient_type"],
obj["recipient_type_id"],
)
for obj in objs
}
display_recipients = bulk_fetch_display_recipients(recipient_tuples)
for obj in objs:
MessageDict.hydrate_recipient_info(obj, display_recipients[obj["recipient_id"]])
@staticmethod
def set_sender_avatar(obj: Dict[str, Any], client_gravatar: bool) -> None:
sender_id = obj["sender_id"]
sender_realm_id = obj["sender_realm_id"]
sender_delivery_email = obj["sender_delivery_email"]
sender_avatar_source = obj["sender_avatar_source"]
sender_avatar_version = obj["sender_avatar_version"]
obj["avatar_url"] = get_avatar_field(
user_id=sender_id,
realm_id=sender_realm_id,
email=sender_delivery_email,
avatar_source=sender_avatar_source,
avatar_version=sender_avatar_version,
medium=False,
client_gravatar=client_gravatar,
)
class ReactionDict:
@staticmethod
def build_dict_from_raw_db_row(row: RawReactionRow) -> Dict[str, Any]:
return {
"emoji_name": row["emoji_name"],
"emoji_code": row["emoji_code"],
"reaction_type": row["reaction_type"],
# TODO: We plan to remove this redundant user dictionary once
# clients are updated to support accessing use user_id. See
# https://github.com/zulip/zulip/pull/14711 for details.
#
# When we do that, we can likely update the `.values()` query to
# not fetch the extra user_profile__* fields from the database
# as a small performance optimization.
"user": {
"email": row["user_profile__email"],
"id": row["user_profile_id"],
"full_name": row["user_profile__full_name"],
},
"user_id": row["user_profile_id"],
}
def access_message(
user_profile: UserProfile,
message_id: int,
lock_message: bool = False,
) -> Tuple[Message, Optional[UserMessage]]:
"""You can access a message by ID in our APIs that either:
(1) You received or have previously accessed via starring
(aka have a UserMessage row for).
(2) Was sent to a public stream in your realm.
We produce consistent, boring error messages to avoid leaking any
information from a security perspective.
The lock_message parameter should be passed by callers that are
planning to modify the Message object. This will use the SQL
`SELECT FOR UPDATE` feature to ensure that other processes cannot
delete the message during the current transaction, which is
important to prevent rare race conditions. Callers must only
pass lock_message when inside a @transaction.atomic block.
"""
try:
base_query = Message.objects.select_related()
if lock_message:
# We want to lock only the `Message` row, and not the related fields
# because the `Message` row only has a possibility of races.
base_query = base_query.select_for_update(of=("self",))
message = base_query.get(id=message_id)
except Message.DoesNotExist:
raise JsonableError(_("Invalid message(s)"))
user_message = get_usermessage_by_message_id(user_profile, message_id)
if has_message_access(user_profile, message, has_user_message=user_message is not None):
return (message, user_message)
raise JsonableError(_("Invalid message(s)"))
def access_web_public_message(
realm: Realm,
message_id: int,
) -> Message:
"""Access control method for unauthenticated requests interacting
with a message in web public streams.
"""
# We throw a MissingAuthenticationError for all errors in this
# code path, to avoid potentially leaking information on whether a
# message with the provided ID exists on the server if the client
# shouldn't have access to it.
if not realm.web_public_streams_enabled():
raise MissingAuthenticationError()
try:
message = Message.objects.select_related().get(id=message_id)
except Message.DoesNotExist:
raise MissingAuthenticationError()
if not message.is_stream_message():
raise MissingAuthenticationError()
queryset = get_web_public_streams_queryset(realm)
try:
stream = queryset.get(id=message.recipient.type_id)
except Stream.DoesNotExist:
raise MissingAuthenticationError()
# These should all have been enforced by the code in
# get_web_public_streams_queryset
assert stream.is_web_public
assert not stream.deactivated
assert not stream.invite_only
assert stream.history_public_to_subscribers
# Now that we've confirmed this message was sent to the target
# web-public stream, we can return it as having been successfully
# accessed.
return message
def has_message_access(
user_profile: UserProfile,
message: Message,
*,
has_user_message: bool,
stream: Optional[Stream] = None,
is_subscribed: Optional[bool] = None,
) -> bool:
"""
Returns whether a user has access to a given message.
* The user_message parameter must be provided if the user has a UserMessage
row for the target message.
* The optional stream parameter is validated; is_subscribed is not.
"""
# If you have a user_message object, you have access.
if has_user_message:
return True
if message.recipient.type != Recipient.STREAM:
# You can't access private messages you didn't receive
return False
if stream is None:
stream = Stream.objects.get(id=message.recipient.type_id)
else:
assert stream.recipient_id == message.recipient_id
if stream.realm != user_profile.realm:
# You can't access public stream messages in other realms
return False
if not stream.is_history_public_to_subscribers():
# You can't access messages you didn't directly receive
# unless history is public to subscribers.
return False
if stream.is_public() and user_profile.can_access_public_streams():
return True
# is_history_public_to_subscribers, so check if you're subscribed
if is_subscribed is not None:
return is_subscribed
return Subscription.objects.filter(
user_profile=user_profile, active=True, recipient=message.recipient
).exists()
def bulk_access_messages(
user_profile: UserProfile, messages: Sequence[Message], *, stream: Optional[Stream] = None
) -> List[Message]:
"""This function does the full has_message_access check for each
message. If stream is provided, it is used to avoid unnecessary
database queries, and will use exactly 2 bulk queries instead.
Throws AssertionError if stream is passed and any of the messages
were not sent to that stream.
"""
filtered_messages = []
user_message_set = set(
bulk_access_messages_expect_usermessage(
user_profile.id, [message.id for message in messages]
)
)
# TODO: Ideally, we'd do a similar bulk-stream-fetch if stream is
# None, so that this function is fast with
subscribed_recipient_ids = set(get_subscribed_stream_recipient_ids_for_user(user_profile))
for message in messages:
has_user_message = message.id in user_message_set
is_subscribed = message.recipient_id in subscribed_recipient_ids
if has_message_access(
user_profile,
message,
has_user_message=has_user_message,
stream=stream,
is_subscribed=is_subscribed,
):
filtered_messages.append(message)
return filtered_messages
def bulk_access_messages_expect_usermessage(
user_profile_id: int, message_ids: Sequence[int]
) -> List[int]:
"""
Like bulk_access_messages, but faster and potentially stricter.
Returns a subset of `message_ids` containing only messages the
user can access. Makes O(1) database queries.
Use this function only when the user is expected to have a
UserMessage row for every message in `message_ids`. If a
UserMessage row is missing, the message will be omitted even if
the user has access (e.g. because it went to a public stream.)
See also: `access_message`, `bulk_access_messages`.
"""
return UserMessage.objects.filter(
user_profile_id=user_profile_id,
message_id__in=message_ids,
).values_list("message_id", flat=True)
def render_markdown(
message: Message,
content: str,
realm: Optional[Realm] = None,
realm_alert_words_automaton: Optional[ahocorasick.Automaton] = None,
mention_data: Optional[MentionData] = None,
email_gateway: bool = False,
) -> MessageRenderingResult:
"""
This is basically just a wrapper for do_render_markdown.
"""
if realm is None:
realm = message.get_realm()
sender = message.sender
sent_by_bot = sender.is_bot
translate_emoticons = sender.translate_emoticons
rendering_result = markdown_convert(
content,
realm_alert_words_automaton=realm_alert_words_automaton,
message=message,
message_realm=realm,
sent_by_bot=sent_by_bot,
translate_emoticons=translate_emoticons,
mention_data=mention_data,
email_gateway=email_gateway,
)
return rendering_result
def huddle_users(recipient_id: int) -> str:
display_recipient: DisplayRecipientT = get_display_recipient_by_id(
recipient_id,
Recipient.HUDDLE,
None,
)
# str is for streams.
assert not isinstance(display_recipient, str)
user_ids: List[int] = [obj["id"] for obj in display_recipient]
user_ids = sorted(user_ids)
return ",".join(str(uid) for uid in user_ids)
def aggregate_message_dict(
input_dict: Dict[int, Any], lookup_fields: List[str]
) -> List[Dict[str, Any]]:
lookup_dict: Dict[Tuple[Any, ...], Dict[str, Any]] = {}
"""
A concrete example might help explain the inputs here:
input_dict = {
1002: dict(stream_id=5, topic='foo'),
1003: dict(stream_id=5, topic='foo'),
1004: dict(stream_id=6, topic='baz'),
}
lookup_fields = ['stream_id', 'topic']
The first time through the loop:
attribute_dict = dict(stream_id=5, topic='foo')
lookup_key = (5, 'foo')
lookup_dict = {
(5, 'foo'): dict(stream_id=5, topic='foo',
unread_message_ids=[1002, 1003],
),
...
}
result = [
dict(stream_id=5, topic='foo',
unread_message_ids=[1002, 1003],
),
...
]
"""
for message_id, attribute_dict in input_dict.items():
lookup_key = tuple(attribute_dict[f] for f in lookup_fields)
if lookup_key not in lookup_dict:
obj = {}
for f in lookup_fields:
obj[f] = attribute_dict[f]
obj["unread_message_ids"] = []
lookup_dict[lookup_key] = obj
bucket = lookup_dict[lookup_key]
bucket["unread_message_ids"].append(message_id)
for dct in lookup_dict.values():
dct["unread_message_ids"].sort()
sorted_keys = sorted(lookup_dict.keys())
return [lookup_dict[k] for k in sorted_keys]
def get_inactive_recipient_ids(user_profile: UserProfile) -> List[int]:
rows = (
get_stream_subscriptions_for_user(user_profile)
.filter(
active=False,
)
.values(
"recipient_id",
)
)
inactive_recipient_ids = [row["recipient_id"] for row in rows]
return inactive_recipient_ids
def get_muted_stream_ids(user_profile: UserProfile) -> List[int]:
rows = (
get_stream_subscriptions_for_user(user_profile)
.filter(
active=True,
is_muted=True,
)
.values(
"recipient__type_id",
)
)
muted_stream_ids = [row["recipient__type_id"] for row in rows]
return muted_stream_ids
def get_starred_message_ids(user_profile: UserProfile) -> List[int]:
return list(
UserMessage.objects.filter(
user_profile=user_profile,
)
.extra(
where=[UserMessage.where_starred()],
)
.order_by(
"message_id",
)
.values_list("message_id", flat=True)[0:10000]
)
def get_raw_unread_data(user_profile: UserProfile) -> RawUnreadMessagesResult:
excluded_recipient_ids = get_inactive_recipient_ids(user_profile)
user_msgs = (
UserMessage.objects.filter(
user_profile=user_profile,
)
.exclude(
message__recipient_id__in=excluded_recipient_ids,
)
.extra(
where=[UserMessage.where_unread()],
)
.values(
"message_id",
"message__sender_id",
MESSAGE__TOPIC,
"message__recipient_id",
"message__recipient__type",
"message__recipient__type_id",
"flags",
)
.order_by("-message_id")
)
# Limit unread messages for performance reasons.
user_msgs = list(user_msgs[:MAX_UNREAD_MESSAGES])
rows = list(reversed(user_msgs))
return extract_unread_data_from_um_rows(rows, user_profile)
def extract_unread_data_from_um_rows(
rows: List[Dict[str, Any]], user_profile: Optional[UserProfile]
) -> RawUnreadMessagesResult:
pm_dict: Dict[int, RawUnreadPrivateMessageDict] = {}
stream_dict: Dict[int, RawUnreadStreamDict] = {}
unmuted_stream_msgs: Set[int] = set()
huddle_dict: Dict[int, RawUnreadHuddleDict] = {}
mentions: Set[int] = set()
total_unreads = 0
raw_unread_messages: RawUnreadMessagesResult = dict(
pm_dict=pm_dict,
stream_dict=stream_dict,
muted_stream_ids=[],
unmuted_stream_msgs=unmuted_stream_msgs,
huddle_dict=huddle_dict,
mentions=mentions,
old_unreads_missing=False,
)
if user_profile is None:
return raw_unread_messages
muted_stream_ids = get_muted_stream_ids(user_profile)
raw_unread_messages["muted_stream_ids"] = muted_stream_ids
topic_mute_checker = build_topic_mute_checker(user_profile)
def is_row_muted(stream_id: int, recipient_id: int, topic: str) -> bool:
if stream_id in muted_stream_ids:
return True
if topic_mute_checker(recipient_id, topic):
return True
# Messages sent by muted users are never unread, so we don't
# need any logic related to muted users here.
return False
huddle_cache: Dict[int, str] = {}
def get_huddle_users(recipient_id: int) -> str:
if recipient_id in huddle_cache:
return huddle_cache[recipient_id]
user_ids_string = huddle_users(recipient_id)
huddle_cache[recipient_id] = user_ids_string
return user_ids_string
for row in rows:
total_unreads += 1
message_id = row["message_id"]
msg_type = row["message__recipient__type"]
recipient_id = row["message__recipient_id"]
sender_id = row["message__sender_id"]
if msg_type == Recipient.STREAM:
stream_id = row["message__recipient__type_id"]
topic = row[MESSAGE__TOPIC]
stream_dict[message_id] = dict(
stream_id=stream_id,
topic=topic,
)
if not is_row_muted(stream_id, recipient_id, topic):
unmuted_stream_msgs.add(message_id)
elif msg_type == Recipient.PERSONAL:
if sender_id == user_profile.id:
other_user_id = row["message__recipient__type_id"]
else:
other_user_id = sender_id
# The `sender_id` field here is misnamed. It's really
# just the other participant in a PM conversation. For
# most unread PM messages, the other user is also the sender,
# but that's not true for certain messages sent from the
# API. Unfortunately, it's difficult now to rename the
# field without breaking mobile.
pm_dict[message_id] = dict(
sender_id=other_user_id,
)
elif msg_type == Recipient.HUDDLE:
user_ids_string = get_huddle_users(recipient_id)
huddle_dict[message_id] = dict(
user_ids_string=user_ids_string,
)
# TODO: Add support for alert words here as well.
is_mentioned = (row["flags"] & UserMessage.flags.mentioned) != 0
is_wildcard_mentioned = (row["flags"] & UserMessage.flags.wildcard_mentioned) != 0
if is_mentioned:
mentions.add(message_id)
if is_wildcard_mentioned:
if msg_type == Recipient.STREAM:
stream_id = row["message__recipient__type_id"]
topic = row[MESSAGE__TOPIC]
if not is_row_muted(stream_id, recipient_id, topic):
mentions.add(message_id)
else: # nocoverage # TODO: Test wildcard mentions in PMs.
mentions.add(message_id)
# Record whether the user had more than MAX_UNREAD_MESSAGES total
# unreads -- that's a state where Zulip's behavior will start to
# be erroneous, and clients should display a warning.
raw_unread_messages["old_unreads_missing"] = total_unreads == MAX_UNREAD_MESSAGES
return raw_unread_messages
def aggregate_unread_data(raw_data: RawUnreadMessagesResult) -> UnreadMessagesResult:
pm_dict = raw_data["pm_dict"]
stream_dict = raw_data["stream_dict"]
unmuted_stream_msgs = raw_data["unmuted_stream_msgs"]
huddle_dict = raw_data["huddle_dict"]
mentions = list(raw_data["mentions"])
count = len(pm_dict) + len(unmuted_stream_msgs) + len(huddle_dict)
pm_objects = aggregate_message_dict(
input_dict=pm_dict,
lookup_fields=[
"sender_id",
],
)
stream_objects = aggregate_message_dict(
input_dict=stream_dict,
lookup_fields=[
"stream_id",
"topic",
],
)
huddle_objects = aggregate_message_dict(
input_dict=huddle_dict,
lookup_fields=[
"user_ids_string",
],
)
result: UnreadMessagesResult = dict(
pms=pm_objects,
streams=stream_objects,
huddles=huddle_objects,
mentions=mentions,
count=count,
old_unreads_missing=raw_data["old_unreads_missing"],
)
return result
def apply_unread_message_event(
user_profile: UserProfile,
state: RawUnreadMessagesResult,
message: Dict[str, Any],
flags: List[str],
) -> None:
message_id = message["id"]
if message["type"] == "stream":
message_type = "stream"
elif message["type"] == "private":
others = [recip for recip in message["display_recipient"] if recip["id"] != user_profile.id]
if len(others) <= 1:
message_type = "private"
else:
message_type = "huddle"
else:
raise AssertionError("Invalid message type {}".format(message["type"]))
if message_type == "stream":
stream_id = message["stream_id"]
topic = message[TOPIC_NAME]
state["stream_dict"][message_id] = RawUnreadStreamDict(
stream_id=stream_id,
topic=topic,
)
if stream_id not in state["muted_stream_ids"]:
# This next check hits the database.
if not topic_is_muted(user_profile, stream_id, topic):
state["unmuted_stream_msgs"].add(message_id)
elif message_type == "private":
if len(others) == 1:
other_id = others[0]["id"]
else:
other_id = user_profile.id
# The `sender_id` field here is misnamed.
state["pm_dict"][message_id] = RawUnreadPrivateMessageDict(
sender_id=other_id,
)
else:
display_recipient = message["display_recipient"]
user_ids = [obj["id"] for obj in display_recipient]
user_ids = sorted(user_ids)
user_ids_string = ",".join(str(uid) for uid in user_ids)
state["huddle_dict"][message_id] = RawUnreadHuddleDict(
user_ids_string=user_ids_string,
)
if "mentioned" in flags:
state["mentions"].add(message_id)
if "wildcard_mentioned" in flags:
if message_id in state["unmuted_stream_msgs"]:
state["mentions"].add(message_id)
def remove_message_id_from_unread_mgs(state: RawUnreadMessagesResult, message_id: int) -> None:
# The opposite of apply_unread_message_event; removes a read or
# deleted message from a raw_unread_msgs data structure.
state["pm_dict"].pop(message_id, None)
state["stream_dict"].pop(message_id, None)
state["huddle_dict"].pop(message_id, None)
state["unmuted_stream_msgs"].discard(message_id)
state["mentions"].discard(message_id)
def estimate_recent_messages(realm: Realm, hours: int) -> int:
stat = COUNT_STATS["messages_sent:is_bot:hour"]
d = timezone_now() - datetime.timedelta(hours=hours)
return (
RealmCount.objects.filter(property=stat.property, end_time__gt=d, realm=realm).aggregate(
Sum("value")
)["value__sum"]
or 0
)
def get_first_visible_message_id(realm: Realm) -> int:
return realm.first_visible_message_id
def maybe_update_first_visible_message_id(realm: Realm, lookback_hours: int) -> None:
recent_messages_count = estimate_recent_messages(realm, lookback_hours)
if realm.message_visibility_limit is not None and recent_messages_count > 0:
update_first_visible_message_id(realm)
def update_first_visible_message_id(realm: Realm) -> None:
if realm.message_visibility_limit is None:
realm.first_visible_message_id = 0
else:
try:
first_visible_message_id = (
Message.objects.filter(sender__realm=realm)
.values("id")
.order_by("-id")[realm.message_visibility_limit - 1]["id"]
)
except IndexError:
first_visible_message_id = 0
realm.first_visible_message_id = first_visible_message_id
realm.save(update_fields=["first_visible_message_id"])
def get_last_message_id() -> int:
# We generally use this function to populate RealmAuditLog, and
# the max id here is actually systemwide, not per-realm. I
# assume there's some advantage in not filtering by realm.
last_id = Message.objects.aggregate(Max("id"))["id__max"]
if last_id is None:
# During initial realm creation, there might be 0 messages in
# the database; in that case, the `aggregate` query returns
# None. Since we want an int for "beginning of time", use -1.
last_id = -1
return last_id
def get_recent_conversations_recipient_id(
user_profile: UserProfile, recipient_id: int, sender_id: int
) -> int:
"""Helper for doing lookups of the recipient_id that
get_recent_private_conversations would have used to record that
message in its data structure.
"""
my_recipient_id = user_profile.recipient_id
if recipient_id == my_recipient_id:
return UserProfile.objects.values_list("recipient_id", flat=True).get(id=sender_id)
return recipient_id
def get_recent_private_conversations(user_profile: UserProfile) -> Dict[int, Dict[str, Any]]:
"""This function uses some carefully optimized SQL queries, designed
to use the UserMessage index on private_messages. It is
significantly complicated by the fact that for 1:1 private
messages, we store the message against a recipient_id of whichever
user was the recipient, and thus for 1:1 private messages sent
directly to us, we need to look up the other user from the
sender_id on those messages. You'll see that pattern repeated
both here and also in zerver/lib/events.py.
Ideally, we would write these queries using Django, but even
without the UNION ALL, that seems to not be possible, because the
equivalent Django syntax (for the first part of this query):
message_data = UserMessage.objects.select_related("message__recipient_id").filter(
user_profile=user_profile,
).extra(
where=[UserMessage.where_private()]
).order_by("-message_id")[:1000].values(
"message__recipient_id").annotate(last_message_id=Max("message_id"))
does not properly nest the GROUP BY (from .annotate) with the slicing.
We return a dictionary structure for convenient modification
below; this structure is converted into its final form by
post_process.
"""
RECENT_CONVERSATIONS_LIMIT = 1000
recipient_map = {}
my_recipient_id = user_profile.recipient_id
query = SQL(
"""
SELECT
subquery.recipient_id, MAX(subquery.message_id)
FROM (
(SELECT
um.message_id AS message_id,
m.recipient_id AS recipient_id
FROM
zerver_usermessage um
JOIN
zerver_message m
ON
um.message_id = m.id
WHERE
um.user_profile_id=%(user_profile_id)s AND
um.flags & 2048 <> 0 AND
m.recipient_id <> %(my_recipient_id)s
ORDER BY message_id DESC
LIMIT %(conversation_limit)s)
UNION ALL
(SELECT
m.id AS message_id,
sender_profile.recipient_id AS recipient_id
FROM
zerver_message m
JOIN
zerver_userprofile sender_profile
ON
m.sender_id = sender_profile.id
WHERE
m.recipient_id=%(my_recipient_id)s
ORDER BY message_id DESC
LIMIT %(conversation_limit)s)
) AS subquery
GROUP BY subquery.recipient_id
"""
)
with connection.cursor() as cursor:
cursor.execute(
query,
{
"user_profile_id": user_profile.id,
"conversation_limit": RECENT_CONVERSATIONS_LIMIT,
"my_recipient_id": my_recipient_id,
},
)
rows = cursor.fetchall()
# The resulting rows will be (recipient_id, max_message_id)
# objects for all parties we've had recent (group?) private
# message conversations with, including PMs with yourself (those
# will generate an empty list of user_ids).
for recipient_id, max_message_id in rows:
recipient_map[recipient_id] = dict(
max_message_id=max_message_id,
user_ids=[],
)
# Now we need to map all the recipient_id objects to lists of user IDs
for (recipient_id, user_profile_id) in (
Subscription.objects.filter(recipient_id__in=recipient_map.keys())
.exclude(user_profile_id=user_profile.id)
.values_list("recipient_id", "user_profile_id")
):
recipient_map[recipient_id]["user_ids"].append(user_profile_id)
# Sort to prevent test flakes and client bugs.
for rec in recipient_map.values():
rec["user_ids"].sort()
return recipient_map
def wildcard_mention_allowed(sender: UserProfile, stream: Stream) -> bool:
realm = sender.realm
# If there are fewer than Realm.WILDCARD_MENTION_THRESHOLD, we
# allow sending. In the future, we may want to make this behavior
# a default, and also just allow explicitly setting whether this
# applies to a stream as an override.
if num_subscribers_for_stream_id(stream.id) <= Realm.WILDCARD_MENTION_THRESHOLD:
return True
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_NOBODY:
return False
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_EVERYONE:
return True
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_ADMINS:
return sender.is_realm_admin
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_MODERATORS:
return sender.is_realm_admin or sender.is_moderator
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_STREAM_ADMINS:
# TODO: Change this when we implement stream administrators
return sender.is_realm_admin
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_FULL_MEMBERS:
return sender.is_realm_admin or (not sender.is_provisional_member and not sender.is_guest)
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_MEMBERS:
return not sender.is_guest
raise AssertionError("Invalid wildcard mention policy")
def parse_message_content_delete_limit(
value: Union[int, str],
special_values_map: Mapping[str, Optional[int]],
) -> Optional[int]:
if isinstance(value, str) and value in special_values_map.keys():
return special_values_map[value]
if isinstance(value, str) or value <= 0:
raise RequestVariableConversionError("message_content_delete_limit_seconds", value)
assert isinstance(value, int)
return value
| 35.242771
| 100
| 0.664122
|
71e1386266d2484bf5093dd8f1e466dfc0c69ce7
| 462
|
py
|
Python
|
data/scripts/templates/object/tangible/container/base/shared_base_container.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/tangible/container/base/shared_base_container.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/tangible/container/base/shared_base_container.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/container/base/shared_base_container.iff"
result.attribute_template_id = -1
result.stfName("container_name","base_container")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 27.176471
| 77
| 0.733766
|
c9f2316df3a4602cab86028a4664761a9040d6c1
| 667
|
py
|
Python
|
lib/aoc/year_2019_day_01.py
|
Repiphany/AoC
|
d59badb62b82434bccd757e37d6d5c4d0bbf2838
|
[
"MIT"
] | null | null | null |
lib/aoc/year_2019_day_01.py
|
Repiphany/AoC
|
d59badb62b82434bccd757e37d6d5c4d0bbf2838
|
[
"MIT"
] | null | null | null |
lib/aoc/year_2019_day_01.py
|
Repiphany/AoC
|
d59badb62b82434bccd757e37d6d5c4d0bbf2838
|
[
"MIT"
] | 1
|
2019-12-06T19:16:12.000Z
|
2019-12-06T19:16:12.000Z
|
#!/usr/bin/env python3
from .input import get_input
def fuel(mass):
return max(mass // 3 - 2, 0)
def fuelr(mass):
fr = [fuel(mass)]
while fr[-1]:
fr.append(fuel(fr[-1]))
return sum(fr)
def test(args):
assert fuel(12) == 2
assert fuel(14) == 2
assert fuel(1969) == 654
assert fuel(100756) == 33583
assert fuelr(14) == 2
assert fuelr(1969) == 966
assert fuelr(100756) == 50346
print('Tests passed')
def main(args):
fuel_sum = fuelr_sum = 0
for line in get_input(args.YEAR, args.DAY):
fuel_sum += fuel(int(line))
fuelr_sum += fuelr(int(line))
print(fuel_sum)
print(fuelr_sum)
| 20.84375
| 47
| 0.595202
|
b74e7a475fa7ff574e5e1429f8f1ebd1c456d878
| 68
|
py
|
Python
|
processing/backends/__init__.py
|
Kingjmk/mlfaati
|
12c0dcbe0389c2c1da0bde80509fb3374955e293
|
[
"MIT"
] | 1
|
2021-01-04T07:34:34.000Z
|
2021-01-04T07:34:34.000Z
|
processing/backends/__init__.py
|
Kingjmk/mlfaati
|
12c0dcbe0389c2c1da0bde80509fb3374955e293
|
[
"MIT"
] | null | null | null |
processing/backends/__init__.py
|
Kingjmk/mlfaati
|
12c0dcbe0389c2c1da0bde80509fb3374955e293
|
[
"MIT"
] | null | null | null |
"""
This module contains processing backends for each file type
"""
| 17
| 59
| 0.75
|
9027820a6ca71736928645d1903955f76db01f42
| 1,995
|
py
|
Python
|
test2.py
|
blevine37/blarf
|
8f1d1abb05d72069323d7911f2afb38ad3d9146b
|
[
"MIT"
] | null | null | null |
test2.py
|
blevine37/blarf
|
8f1d1abb05d72069323d7911f2afb38ad3d9146b
|
[
"MIT"
] | null | null | null |
test2.py
|
blevine37/blarf
|
8f1d1abb05d72069323d7911f2afb38ad3d9146b
|
[
"MIT"
] | null | null | null |
import numpy as np
import blarf
import os
natoms = 5
k = 200
ff = blarf.potential()
ff.init_chain(natoms,1.0,1.0,(1.414/1.122),0.1)
r = np.zeros(natoms*3)
r[3] = 0.2
r[5] = 1.0
r[7] = 1.1
r[8] = 1.0
#3
r[10] = 1.1
r[11] = 1.9
r[13] = 0.9
r[14] = 3.0
# 5
#r[15] = 0.3
#r[16] = 0.9
#r[17] = 4.0
# 6
#r[19] = 1.1
#r[20] = 4.8
print "r ", r
ff.eval_pes(r)
print "e ", ff.get_energy()
print "f ", ff.get_forces()
vv = blarf.classical()
vv.set_numdims(natoms*3)
vv.set_positions(r)
vv.set_momenta(np.zeros(natoms*3))
vv.set_timestep(0.001)
vv.set_maxtime(1000.0)
vv.set_num_rescale_steps(100)
vv.set_temperature(0.1)
data = blarf.dataset(natoms*3)
vv.propagate(ff,data,stride=100)
print data.get_numpoints()
print data.get_positions()
print data.get_energies_exact()
thindata = data.thin_dataset(1)
thindata.set_internal_type("reciprical_bond")
thindata.compute_internals()
#width_factors = [2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0]
#numtrials = 5
width_factors = [32.0]
numtrials = 1
if os.path.exists("mur.dat"):
os.remove("mur.dat")
iw = 0
for w in width_factors:
murs = np.zeros(numtrials)
for itrial in range(numtrials):
clust = blarf.cluster(k,thindata.get_numinternals())
clust.k_means_optimization(thindata)
thindata.h5_output("thindata.hdf5")
network = blarf.rbfn()
network.set_width_factor(w)
network.init_from_cluster_reciprical_bonds_traditionalrbf(clust)
#network.init_from_cluster_reciprical_bonds_onedimensional(clust)
network.solve_weights(thindata)
filename = "network." + str(w) + "." + str(itrial) + ".hdf5"
network.h5_output(filename)
murs[itrial] = thindata.get_mean_unsigned_residual()
f = open('mur.dat', 'a')
s = str(w) + " "
for imur in range(murs.size):
s = s + str(murs[imur]) + " "
s = s + str(np.sum(murs) / murs.size) + "\n"
f.write(s)
f.close()
iw += 1
| 18.472222
| 73
| 0.626566
|
34021945763c42e2a492d05de7d4984bb354e36d
| 221
|
py
|
Python
|
PyHEADTAIL/trackers/simple_long_tracking.py
|
fsoubelet/PyHEADTAIL
|
51cae8845cceb61cc3f140db4ab0eeb68469110f
|
[
"BSD-3-Clause"
] | null | null | null |
PyHEADTAIL/trackers/simple_long_tracking.py
|
fsoubelet/PyHEADTAIL
|
51cae8845cceb61cc3f140db4ab0eeb68469110f
|
[
"BSD-3-Clause"
] | null | null | null |
PyHEADTAIL/trackers/simple_long_tracking.py
|
fsoubelet/PyHEADTAIL
|
51cae8845cceb61cc3f140db4ab0eeb68469110f
|
[
"BSD-3-Clause"
] | null | null | null |
from .longitudinal_tracking import *
from PyHEADTAIL.general.decorators import deprecated
@deprecated("--> Use the longitudinal_tracking module instead.\n")
def simple_long_tracking():
pass
simple_long_tracking()
| 20.090909
| 66
| 0.79638
|
f029f0ff0d0f555e3d9113675f41decf654add74
| 2,421
|
py
|
Python
|
nlp_news.py
|
CoderPaulK/news_nlp
|
1a74a08727993b8ef3b38a2cc9151bdfd275abff
|
[
"Apache-2.0"
] | null | null | null |
nlp_news.py
|
CoderPaulK/news_nlp
|
1a74a08727993b8ef3b38a2cc9151bdfd275abff
|
[
"Apache-2.0"
] | null | null | null |
nlp_news.py
|
CoderPaulK/news_nlp
|
1a74a08727993b8ef3b38a2cc9151bdfd275abff
|
[
"Apache-2.0"
] | null | null | null |
import pprint
import requests
import pickle
import pandas as pd
import spacy
from sklearn.neighbors import NearestNeighbors
from tqdm import tqdm
from sklearn.cluster import DBSCAN
from sklearn.datasets.samples_generator import make_blobs
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
sns.set()
def show_blobs():
X, y = make_blobs(n_samples=300, centers=4, cluster_std=0.60, random_state=0)
plt.scatter(X[:, 0], X[:, 1])
def load_web():
secret = 'xxx'
url = 'https://newsapi.org/v2/everything?'
parameters = {
'q': 'big data', # query phrase
'pageSize': 20, # maximum is 100
'apiKey': secret # your own API key
}
# Make the request
response = requests.get(url, params=parameters)
# Convert the response to JSON format and pretty print it
data = response.json()
with open('output.pickle', 'wb') as w:
pickle.dump(data, w)
def load_file():
with open('output.pickle', 'rb') as r:
articles = pickle.load(r)
return articles
def make_df():
titles = []
dates = []
descriptions = []
for line in load_file()['articles']:
titles.append(line['title'])
dates.append(line['publishedAt'])
descriptions.append(line['description'])
# print({'titles':titles,'desc':descriptions, 'dates':dates})
df = pd.DataFrame(data={'titles': titles, 'desc': descriptions, 'dates': dates})
df = df.drop_duplicates(subset='titles').reset_index(drop=True)
df = df.dropna()
print(df.head())
return df, titles
df, titles = make_df()
nlp = spacy.load('en_core_web_sm')
sent_vecs = {}
docs = []
for title in tqdm(titles):
doc = nlp(title)
docs.append(doc)
sent_vecs.update({title: doc.vector})
sentences = list(sent_vecs.keys())
vectors = list(sent_vecs.values())
print(sentences)
X = np.array(vectors)
n_classes = {}
neigh = NearestNeighbors(n_neighbors=2)
nbrs = neigh.fit(X)
distances, indices = nbrs.kneighbors(X)
distances = np.sort(distances, axis=0)
distances = distances[:, 1]
plt.plot(distances)
plt.show()
for i in tqdm(np.arange(0.2, 0.3, 0.001)):
dbscan = DBSCAN(eps=i, min_samples=2, metric='cosine').fit(X)
n_classes.update({i: len(pd.Series(dbscan.labels_).value_counts())})
print(dbscan.labels_)
# dbscan=DBSCAN(eps=0.26, min_samples=2, metric='cosine').fit(X)
# print(n_classes.values())
# show_blobs()
# plt.show()
| 27.827586
| 84
| 0.670384
|
49e3198cbd569976172fded57185698c8c8ae7e2
| 4,001
|
py
|
Python
|
wiserHeatAPIv2/heating_actuator.py
|
LGO44/wiserHeatAPIv2
|
dfe66edf21d069764f64842e4218ae39c81701dc
|
[
"MIT"
] | null | null | null |
wiserHeatAPIv2/heating_actuator.py
|
LGO44/wiserHeatAPIv2
|
dfe66edf21d069764f64842e4218ae39c81701dc
|
[
"MIT"
] | null | null | null |
wiserHeatAPIv2/heating_actuator.py
|
LGO44/wiserHeatAPIv2
|
dfe66edf21d069764f64842e4218ae39c81701dc
|
[
"MIT"
] | null | null | null |
from . import _LOGGER
from .device import _WiserDevice
from .helpers import _WiserTemperatureFunctions as tf
from .rest_controller import _WiserRestController
from .const import TEMP_OFF, TEXT_UNKNOWN, WISERHEATINGACTUATOR, WISERDEVICE
import inspect
class _WiserHeatingActuator(_WiserDevice):
"""Class representing a Wiser Heating Actuator device"""
def __init__(self, wiser_rest_controller:_WiserRestController, data: dict, device_type_data: dict):
super().__init__(data)
self._wiser_rest_controller = wiser_rest_controller
self._device_type_data = device_type_data
self._device_lock_enabled = False
self._indentify_active = data.get("IdentifyActive", False)
def _send_command(self, cmd: dict, device_level: bool = False):
"""
Send control command to the heating actuator
param cmd: json command structure
return: boolen - true = success, false = failed
"""
if device_level:
result = self._wiser_rest_controller._send_command(WISERDEVICE.format(self.id), cmd)
else:
result = self._wiser_rest_controller._send_command(WISERHEATINGACTUATOR.format(self.id), cmd)
if result:
_LOGGER.debug(
"Wiser heating actuator - {} command successful".format(
inspect.stack()[1].function
)
)
return result
@property
def current_target_temperature(self) -> float:
"""Get the smart valve current target temperature setting"""
return tf._from_wiser_temp(self._device_type_data.get("OccupiedHeatingSetPoint", TEMP_OFF))
@property
def current_temperature(self) -> float:
"""Get the current temperature measured by the smart valve"""
return tf._from_wiser_temp(self._device_type_data.get("MeasuredTemperature", TEMP_OFF), "current")
@property
def delivered_power(self) -> int:
"""Get the amount of current throught the plug over time"""
return self._device_type_data.get("CurrentSummationDelivered", 0)
@property
def device_lock_enabled(self) -> bool:
"""Get or set heating actuator device lock"""
return self._device_lock_enabled
@device_lock_enabled.setter
def device_lock_enabled(self, enable: bool):
if self._send_command({"DeviceLockEnabled": enable}, True):
self._device_lock_enabled = enable
@property
def identify(self) -> bool:
"""Get or set if the smart valve identify function is enabled"""
return self._indentify_active
@identify.setter
def identify(self, enable: bool = False):
if self._send_command({"Identify": enable}, True):
self._indentify_active = enable
@property
def instantaneous_power(self) -> int:
"""Get the amount of current throught the plug now"""
return self._device_type_data.get("InstantaneousDemand", 0)
@property
def output_type(self) -> str:
"""Get output type"""
return self._device_type_data.get("OutputType", TEXT_UNKNOWN)
@property
def room_id(self) -> int:
"""Get heating actuator room id"""
return self._device_type_data.get("RoomId", 0)
class _WiserHeatingActuatorCollection(object):
"""Class holding all wiser heating actuators"""
def __init__(self):
self._heating_actuators = []
@property
def all(self) -> dict:
return list(self._heating_actuators)
@property
def count(self) -> int:
return len(self.all)
def get_by_id(self, id: int) -> _WiserHeatingActuator:
"""
Gets a Heating Actuator object from the Heating Actuators id
param id: id of smart valve
return: _WiserSmartValve object
"""
try:
return [
heating_actuator for heating_actuator in self.all if heating_actuator.id == id
][0]
except IndexError:
return None
| 33.90678
| 106
| 0.662584
|
df1d359d4686ef890cadcdc7efbc056ba0abca4a
| 4,936
|
py
|
Python
|
src/modules/db_operations/operations.py
|
AndersenLab/CAENDR
|
ce4cdb74db736db8226ffc90988959b71b0d5ff5
|
[
"MIT"
] | 3
|
2022-02-09T07:04:37.000Z
|
2022-03-11T02:46:35.000Z
|
src/modules/db_operations/operations.py
|
AndersenLab/CAENDR
|
ce4cdb74db736db8226ffc90988959b71b0d5ff5
|
[
"MIT"
] | 4
|
2022-01-28T22:28:08.000Z
|
2022-02-11T21:47:15.000Z
|
src/modules/db_operations/operations.py
|
AndersenLab/CAENDR
|
ce4cdb74db736db8226ffc90988959b71b0d5ff5
|
[
"MIT"
] | 1
|
2022-01-11T03:39:02.000Z
|
2022-01-11T03:39:02.000Z
|
import os
from caendr.services.cloud.postgresql import health_database_status
from logzero import logger
from caendr.models.error import EnvVarError
from caendr.models.sql import WormbaseGene, WormbaseGeneSummary, Strain, Homolog, StrainAnnotatedVariant
from caendr.services.sql.db import (drop_tables,
download_all_external_dbs,
download_external_db,
backup_external_db,
fetch_internal_db)
from caendr.services.sql.etl import (load_strains,
load_genes_summary,
load_genes,
load_homologs,
load_orthologs,
load_strain_annotated_variants)
def execute_operation(app, db, DB_OP):
WORMBASE_VERSION = os.environ.get('WORMBASE_VERSION')
STRAIN_VARIANT_ANNOTATION_VERSION = os.environ.get('STRAIN_VARIANT_ANNOTATION_VERSION')
logger.info(f'Executing {DB_OP}: WORMBASE_VERSION:{WORMBASE_VERSION} STRAIN_VARIANT_ANNOTATION_VERSION:{STRAIN_VARIANT_ANNOTATION_VERSION}')
if DB_OP == 'DROP_AND_POPULATE_ALL_TABLES':
if not WORMBASE_VERSION or not STRAIN_VARIANT_ANNOTATION_VERSION:
raise EnvVarError()
drop_and_populate_all_tables(app, db, WORMBASE_VERSION, STRAIN_VARIANT_ANNOTATION_VERSION)
elif DB_OP == 'DROP_AND_POPULATE_STRAINS':
drop_and_populate_strains(app, db)
elif DB_OP == 'DROP_AND_POPULATE_WORMBASE_GENES':
if not WORMBASE_VERSION:
raise EnvVarError()
drop_and_populate_wormbase_genes(app, db, WORMBASE_VERSION)
elif DB_OP == 'DROP_AND_POPULATE_STRAIN_ANNOTATED_VARIANTS':
if not STRAIN_VARIANT_ANNOTATION_VERSION:
raise EnvVarError()
drop_and_populate_strain_annotated_variants(app, db, STRAIN_VARIANT_ANNOTATION_VERSION)
elif DB_OP == 'TEST_ECHO':
result, message = health_database_status()
if not result:
raise Exception(f"DB Connection is: { ('OK' if result else 'ERROR') }. {message}")
elif DB_OP == 'TEST_MOCK_DATA':
os.environ["USE_MOCK_DATA"] = "1"
os.environ["MODULE_DB_OPERATIONS_CONNECTION_TYPE"] = "memory"
logger.info("Using MOCK DATA")
drop_and_populate_all_tables(app, db, WORMBASE_VERSION, STRAIN_VARIANT_ANNOTATION_VERSION)
def drop_and_populate_strains(app, db):
drop_tables(app, db, tables=[Strain.__table__])
load_strains(db)
def drop_and_populate_wormbase_genes(app, db, wb_ver: str):
logger.info(f"Running Drop and Populate wormbase genes with version: {wb_ver}")
gene_gff_fname = download_external_db('GENE_GFF_URL', wb_ver=wb_ver)
gene_gtf_fname = download_external_db('GENE_GTF_URL', wb_ver=wb_ver)
gene_ids_fname = download_external_db('GENE_IDS_URL', wb_ver=wb_ver)
homologene_fname = download_external_db('HOMOLOGENE_URL')
ortholog_fname = download_external_db('ORTHOLOG_URL')
drop_tables(app, db, tables=[Homolog.__table__, WormbaseGene.__table__])
drop_tables(app, db, tables=[WormbaseGeneSummary.__table__])
load_genes_summary(db, gene_gff_fname)
load_genes(db, gene_gtf_fname, gene_ids_fname)
load_homologs(db, homologene_fname)
load_orthologs(db, ortholog_fname)
def drop_and_populate_strain_annotated_variants(app, db, sva_ver: str):
sva_fname = fetch_internal_db('SVA_CSVGZ_URL', sva_ver=sva_ver)
db.session.commit()
logger.info(f"Dropping table...")
drop_tables(app, db, tables=[StrainAnnotatedVariant.__table__])
logger.info("Loading strain annotated variants...")
load_strain_annotated_variants(db, sva_fname)
def drop_and_populate_all_tables(app, db, wb_ver: str, sva_ver: str):
logger.info(f'Dropping and populating all tables - WORMBASE_VERSION: {wb_ver} STRAIN_VARIANT_ANNOTATION_VERSION: {sva_ver}')
logger.info("[1/8] Downloading databases...eta ~0:15")
filenames = download_all_external_dbs(wb_ver)
gene_gff_fname = filenames['GENE_GFF_URL']
gene_gtf_fname = filenames['GENE_GTF_URL']
gene_ids_fname = filenames['GENE_IDS_URL']
homologene_fname = filenames['HOMOLOGENE_URL']
ortholog_fname = filenames['ORTHOLOG_URL']
filenames['SVA_CSVGZ_URL'] = fetch_internal_db('SVA_CSVGZ_URL', sva_ver)
sva_fname = filenames['SVA_CSVGZ_URL']
logger.info("[2/8] Dropping tables...eta ~0:01")
drop_tables(app, db)
logger.info("[3/8] Load Strains...eta ~0:24")
load_strains(db)
logger.info("[4/8] Load genes summary...eta ~3:15")
load_genes_summary(db, gene_gff_fname)
logger.info("[5/8] Load genes...eta ~12:37")
load_genes(db, gene_gtf_fname, gene_ids_fname)
logger.info("[6/8] Load Homologs...eta ~3:10")
load_homologs(db, homologene_fname)
logger.info("[7/8] Load Horthologs...eta ~17:13")
load_orthologs(db, ortholog_fname)
logger.info("[8/8] Load Strains Annotated Variants...eta ~26:47")
load_strain_annotated_variants(db, sva_fname)
| 41.478992
| 142
| 0.725081
|
bd6e2b32c2f0efc91472f87505948f39c17b04c0
| 9,949
|
py
|
Python
|
jax/interpreters/sharded_jit.py
|
davesque/jax
|
47df7b95c44d27a2bb78636c7642a60cdb622402
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-03-12T04:34:46.000Z
|
2020-03-12T04:34:46.000Z
|
jax/interpreters/sharded_jit.py
|
davesque/jax
|
47df7b95c44d27a2bb78636c7642a60cdb622402
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jax/interpreters/sharded_jit.py
|
davesque/jax
|
47df7b95c44d27a2bb78636c7642a60cdb622402
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from absl import logging
import numpy as onp
from .. import core
from ..abstract_arrays import ShapedArray, ConcreteArray, array_types
from . import partial_eval as pe
from . import xla
from .. import linear_util as lu
from ..lib import xla_bridge as xb
from ..lib import xla_client as xc
from ..api_util import flatten_fun
from ..tree_util import tree_flatten, tree_unflatten
from ..util import extend_name_stack, wrap_name
"""WIP shared_jit"""
def _map(f, *xs):
return tuple(map(f, *xs))
### arg handling
def _spatial_partitioned_args(devices, assignments, partitions, args):
nargs = len(args)
nrep, npar = assignments.shape
# buffers = [[[None] * nargs for _ in range(npar)] for _ in range(nrep)] # TODO
buffers = [[None] * nargs for _ in range(nrep * npar)]
for a, (arg, partition) in enumerate(zip(args, partitions)):
bufs = _partition_array(arg, devices, assignments,
partition)
for r in range(nrep):
for p in range(npar):
# buffers[r][p][a] = bufs[r][p] # TODO update C++
buffers[r * npar + p][a] = bufs[r][p]
return buffers
partition_arg_handlers = {}
def _partition_array(x, devices, assignments, partition):
nrep, npar = assignments.shape
assert nrep == 1 # TODO generalize beyond single-replica
shards = [x]
for i, parts in enumerate(partition):
shards = _flatten(onp.split(s, parts, i) for s in shards)
# logging.error("===== shards: %s" % [s.shape for s in shards])
bufs = [[None] * npar for _ in range(nrep)]
for (r, p), device in onp.ndenumerate(assignments):
bufs[r][p] = xla.device_put(shards[p], devices[device])
return bufs
def _flatten(lst):
return [elt for sublst in lst for elt in sublst]
for _t in array_types:
partition_arg_handlers[_t] = _partition_array
### result handling
def _pvals_to_results_handler(nrep, npar, partitions, out_pvals):
nouts = len(out_pvals)
handlers = _map(_pval_to_result_handler, partitions, out_pvals)
def handler(out_bufs):
buffers = [[[None] * npar for _ in range(nrep)] for _ in range(nouts)]
for raw_idx, tuple_buf in enumerate(out_bufs):
r, p = onp.unravel_index(raw_idx, (nrep, npar))
for i, buf in enumerate(tuple_buf.destructure()):
buffers[i][r][p] = buf
return [h(bufs) for h, bufs in zip(handlers, buffers)]
return handler
def _pval_to_result_handler(partition, pval):
pv, const = pval
if pv is None:
raise NotImplementedError # TODO handle constant outputs
else:
return _aval_to_result_handler(partition, pv)
def _aval_to_result_handler(partition, aval):
return result_handlers[type(aval)](partition, aval)
result_handlers = {}
def _array_result_handler(partition, aval):
def handler(bufs):
bufs, = bufs # TODO generalize beyond single replica
shards = [buf.to_py() for buf in bufs] # TODO device persistence
partition = (1,) # TODO (wangtao): revisit this hack.
for i, parts in enumerate(partition):
shards = [onp.concatenate(cs, axis=i) for cs in _chunk(shards, parts)]
result = shards
return result
return handler
def _chunk(lst, sz):
assert not len(lst) % sz
return [lst[i:i + sz] for i in range(0, len(lst), sz)]
result_handlers[ShapedArray] = _array_result_handler
result_handlers[ConcreteArray] = _array_result_handler
### computation building
@lu.cache
def _sharded_callable(fun: lu.WrappedFun, partitions, name, *abstract_args):
nrep = 1 # TODO generalize
in_pvals = [pe.PartialVal((aval, core.unit)) for aval in abstract_args]
jaxpr, out_pvals, consts = pe.trace_to_jaxpr(fun, in_pvals, instantiate=False, bottom=True)
if not jaxpr.eqns and all(outvar is core.unitvar for outvar in jaxpr.outvars):
return lambda *_: [core.unit] * len(jaxpr.outvars)
c = xb.make_computation_builder("spjit_{}".format(fun.__name__))
xla_consts = _map(c.Constant, consts)
xla_args = _xla_sharded_args(c, abstract_args, partitions[0])
axis_env = xla.AxisEnv(nrep, [], [])
out_nodes = xla.jaxpr_subcomp(
c, jaxpr, None, axis_env, xla_consts, (),
extend_name_stack(wrap_name(name, "sharded_jit")),
*xla_args)
c._builder.SetSharding(_sharding_to_proto(partitions[1]))
out_tuple = c.Tuple(*out_nodes)
c._builder.ClearSharding()
built = c.Build(out_tuple)
num_partitions = _get_num_partitions(partitions[0])
devices = xb.local_devices()[:num_partitions]
assert len(devices) == num_partitions # TODO generalize beyond single-replica
device_assignment = onp.array([[d.id for d in devices]])
device_assignment = onp.reshape(device_assignment, (-1, num_partitions))
# device_assignment = None # TODO(skye): replace with default device assignment?
compiled = built.Compile(
compile_options=xb.get_compile_options(nrep, num_partitions, device_assignment),
backend=xb.get_backend(None))
# logging.error("===== hlo:\n%s" % built.GetHloText())
handle_args = partial(_spatial_partitioned_args, compiled.local_devices(),
device_assignment, partitions[0])
handle_outs = _pvals_to_results_handler(nrep, num_partitions, partitions[1],
out_pvals)
return partial(_execute_spatially_partitioned, compiled, handle_args,
handle_outs)
def _sharded_jit_translation_rule(c, axis_env, freevar_nodes,
in_nodes, name_stack, partitions, backend,
name, call_jaxpr):
subc = xb.make_computation_builder("jaxpr_subcomputation") # TODO(mattjj): name
freevars = [subc.ParameterWithShape(c.GetShape(n)) for n in freevar_nodes]
args = []
for p, a in zip(partitions[0], in_nodes):
subc._builder.SetSharding(_sharding_to_proto(p))
args.append(subc.ParameterWithShape(c.GetShape(a)))
subc._builder.ClearSharding()
# args = [subc.ParameterWithShape(c.GetShape(n)) for n in in_nodes]
out_nodes = xla.jaxpr_subcomp(subc, call_jaxpr, backend, axis_env, (), freevars, name_stack, *args)
subc._builder.SetSharding(_sharding_to_proto(partitions[1]))
out_tuple = subc.Tuple(*out_nodes)
subc._builder.ClearSharding()
subc = subc.Build(out_tuple)
return c.Call(subc, list(freevar_nodes) + list(in_nodes))
def _execute_spatially_partitioned(compiled, in_handler, out_handler, *args):
input_bufs = in_handler(args)
out_bufs = compiled.ExecuteOnLocalDevices(list(input_bufs))
return out_handler(out_bufs)
def _xla_sharded_args(c, avals, partitions):
xla_args = []
for p, a in zip(partitions, avals):
c._builder.SetSharding(_sharding_to_proto(p))
# logging.error("===== aval shape: %s" % str(a.shape))
shape = xc.Shape.array_shape(a.dtype, (4,8))
xla_args.append(c.ParameterWithShape(xla.aval_to_xla_shape(a)))
c._builder.ClearSharding()
return xla_args
def _sharding_to_proto(sharding):
proto = xc.OpSharding()
if isinstance(sharding, tuple):
if sharding[0] is None or isinstance(sharding[0], tuple):
sub_protos = [_sharding_to_proto(s) for s in sharding]
xc.type = xc.OpSharding.Type.TUPLE
xc.tuple_shardings = sub_protos
return proto
if sharding is None:
proto.type = xc.OpSharding.Type.REPLICATED
else:
proto.type = xc.OpSharding.Type.OTHER
proto.tile_assignment_dimensions = list(sharding)
proto.tile_assignment_devices = list(range(onp.product(sharding)))
return proto
def _get_num_partitions(partitions):
num_partitions = onp.prod(onp.max(partitions, axis=0))
return num_partitions
def get_num_partitions(partitions):
num_partitions_set = set(_get_num_partitions(parts) for parts in partitions)
if len(num_partitions_set) > 1:
raise ValueError(
"All partition specs must use the same number of total partitions, "
"got: %s" % partitions)
return num_partitions_set.pop()
def jaxpr_partitions(jaxpr):
for eqn in jaxpr.eqns:
if eqn.primitive == sharded_call_p:
# TODO(skye): change API to support different output partitions
return (eqn.params["partitions"][0], (eqn.params["partitions"][1],))
# TODO(skye): more error checking
# return _get_num_partitions(eqn.params["partitions"][0])
return None, None
### sharded_call
def _sharded_call_impl(fun: lu.WrappedFun, *args, **params):
partitions = params.pop("partitions")
name = params.pop("name")
assert not params, params
compiled_fun = _sharded_callable(fun, partitions, name,
*map(xla.abstractify, args))
return compiled_fun(*args)
sharded_call_p = core.Primitive("sharded_call")
sharded_call_p.call_primitive = True
sharded_call_p.multiple_results = True
sharded_call = partial(core.call_bind, sharded_call_p)
sharded_call_p.def_custom_bind(sharded_call)
sharded_call_p.def_impl(_sharded_call_impl)
xla.call_translations[sharded_call_p] = _sharded_jit_translation_rule
def sharded_jit(fun, partitions):
if xb.get_backend().platform != "tpu":
logging.warning("sharded_jit only works on TPU")
def wrapped(*args, **kwargs):
f = lu.wrap_init(fun)
args_flat, in_tree = tree_flatten((args, kwargs))
flat_fun, out_tree = flatten_fun(f, in_tree)
out = sharded_call(flat_fun, *args_flat, partitions=partitions,
name=flat_fun.__name__)
return tree_unflatten(out_tree(), out)
return wrapped
| 33.498316
| 101
| 0.711026
|
59c4db74a162570274e849486bee7887be54b18a
| 62,157
|
py
|
Python
|
libcloud/compute/base.py
|
gig-tech/libcloud
|
3c7f83f64ee72252a14ddc1e1f8cac4474a0be4b
|
[
"Apache-2.0"
] | null | null | null |
libcloud/compute/base.py
|
gig-tech/libcloud
|
3c7f83f64ee72252a14ddc1e1f8cac4474a0be4b
|
[
"Apache-2.0"
] | null | null | null |
libcloud/compute/base.py
|
gig-tech/libcloud
|
3c7f83f64ee72252a14ddc1e1f8cac4474a0be4b
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides base classes for working with drivers
"""
from __future__ import with_statement
from typing import Dict
from typing import List
from typing import Tuple
from typing import Type
from typing import Optional
from typing import Any
from typing import Union
from typing import TYPE_CHECKING
import time
import hashlib
import os
import re
import socket
import random
import binascii
import datetime
from libcloud.utils.py3 import b
import libcloud.compute.ssh
from libcloud.pricing import get_size_price
from libcloud.compute.types import NodeState, StorageVolumeState,\
DeploymentError
if TYPE_CHECKING:
from libcloud.compute.deployment import Deployment
from libcloud.compute.types import Provider
from libcloud.compute.types import NodeImageMemberState
from libcloud.compute.ssh import SSHClient
from libcloud.compute.ssh import BaseSSHClient
from libcloud.common.base import Connection
from libcloud.common.base import ConnectionKey
from libcloud.common.base import BaseDriver
from libcloud.common.types import LibcloudError
from libcloud.compute.ssh import have_paramiko
from libcloud.utils.networking import is_private_subnet
from libcloud.utils.networking import is_valid_ip_address
if have_paramiko:
from paramiko.ssh_exception import SSHException
from paramiko.ssh_exception import AuthenticationException
SSH_TIMEOUT_EXCEPTION_CLASSES = (AuthenticationException, SSHException,
IOError, socket.gaierror, socket.error)
else:
SSH_TIMEOUT_EXCEPTION_CLASSES = (IOError, socket.gaierror, # type: ignore
socket.error) # type: ignore
T_Auth = Union['NodeAuthSSHKey', 'NodeAuthPassword']
T_Ssh_key = Union[List[str], str]
# How long to wait for the node to come online after creating it
NODE_ONLINE_WAIT_TIMEOUT = 10 * 60
# How long to try connecting to a remote SSH server when running a deployment
# script.
SSH_CONNECT_TIMEOUT = 5 * 60
__all__ = [
'Node',
'NodeState',
'NodeSize',
'NodeImage',
'NodeImageMember',
'NodeLocation',
'NodeAuthSSHKey',
'NodeAuthPassword',
'NodeDriver',
'StorageVolume',
'StorageVolumeState',
'VolumeSnapshot',
# Deprecated, moved to libcloud.utils.networking
'is_private_subnet',
'is_valid_ip_address'
]
class UuidMixin(object):
"""
Mixin class for get_uuid function.
"""
def __init__(self):
self._uuid = None # type: str
def get_uuid(self):
"""
Unique hash for a node, node image, or node size
The hash is a function of an SHA1 hash of the node, node image,
or node size's ID and its driver which means that it should be
unique between all objects of its type.
In some subclasses (e.g. GoGridNode) there is no ID
available so the public IP address is used. This means that,
unlike a properly done system UUID, the same UUID may mean a
different system install at a different time
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> node = driver.create_node()
>>> node.get_uuid()
'd3748461511d8b9b0e0bfa0d4d3383a619a2bb9f'
Note, for example, that this example will always produce the
same UUID!
:rtype: ``str``
"""
if not self._uuid:
self._uuid = hashlib.sha1(b('%s:%s' %
(self.id, self.driver.type))).hexdigest()
return self._uuid
@property
def uuid(self):
# type: () -> str
return self.get_uuid()
class Node(UuidMixin):
"""
Provide a common interface for handling nodes of all types.
The Node object provides the interface in libcloud through which
we can manipulate nodes in different cloud providers in the same
way. Node objects don't actually do much directly themselves,
instead the node driver handles the connection to the node.
You don't normally create a node object yourself; instead you use
a driver and then have that create the node for you.
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> node = driver.create_node()
>>> node.public_ips[0]
'127.0.0.3'
>>> node.name
'dummy-3'
You can also get nodes from the driver's list_node function.
>>> node = driver.list_nodes()[0]
>>> node.name
'dummy-1'
The node keeps a reference to its own driver which means that we
can work on nodes from different providers without having to know
which is which.
>>> driver = DummyNodeDriver(72)
>>> node2 = driver.create_node()
>>> node.driver.creds
0
>>> node2.driver.creds
72
Although Node objects can be subclassed, this isn't normally
done. Instead, any driver specific information is stored in the
"extra" attribute of the node.
>>> node.extra
{'foo': 'bar'}
"""
def __init__(self,
id, # type: str
name, # type: str
state, # type: NodeState
public_ips, # type: List[str]
private_ips, # type: List[str]
driver,
size=None, # type: NodeSize
image=None, # type: NodeImage
extra=None, # type: dict
created_at=None # type: datetime.datetime
):
"""
:param id: Node ID.
:type id: ``str``
:param name: Node name.
:type name: ``str``
:param state: Node state.
:type state: :class:`libcloud.compute.types.NodeState`
:param public_ips: Public IP addresses associated with this node.
:type public_ips: ``list``
:param private_ips: Private IP addresses associated with this node.
:type private_ips: ``list``
:param driver: Driver this node belongs to.
:type driver: :class:`.NodeDriver`
:param size: Size of this node. (optional)
:type size: :class:`.NodeSize`
:param image: Image of this node. (optional)
:type image: :class:`.NodeImage`
:param created_at: The datetime this node was created (optional)
:type created_at: :class: `datetime.datetime`
:param extra: Optional provider specific attributes associated with
this node.
:type extra: ``dict``
"""
self.id = str(id) if id else None
self.name = name
self.state = state
self.public_ips = public_ips if public_ips else []
self.private_ips = private_ips if private_ips else []
self.driver = driver
self.size = size
self.created_at = created_at
self.image = image
self.extra = extra or {}
UuidMixin.__init__(self)
def reboot(self):
# type: () -> bool
"""
Reboot this node
:return: ``bool``
This calls the node's driver and reboots the node
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> node = driver.create_node()
>>> node.state == NodeState.RUNNING
True
>>> node.state == NodeState.REBOOTING
False
>>> node.reboot()
True
>>> node.state == NodeState.REBOOTING
True
"""
return self.driver.reboot_node(self)
def start(self):
# type: () -> bool
"""
Start this node.
:return: ``bool``
"""
return self.driver.start_node(self)
def stop_node(self):
# type: () -> bool
"""
Stop (shutdown) this node.
:return: ``bool``
"""
return self.driver.stop_node(self)
def destroy(self):
# type: () -> bool
"""
Destroy this node
:return: ``bool``
This calls the node's driver and destroys the node
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> from libcloud.compute.types import NodeState
>>> node = driver.create_node()
>>> node.state == NodeState.RUNNING
True
>>> node.destroy()
True
>>> node.state == NodeState.RUNNING
False
"""
return self.driver.destroy_node(self)
def __repr__(self):
state = NodeState.tostring(self.state)
return (('<Node: uuid=%s, name=%s, state=%s, public_ips=%s, '
'private_ips=%s, provider=%s ...>')
% (self.uuid, self.name, state, self.public_ips,
self.private_ips, self.driver.name))
class NodeSize(UuidMixin):
"""
A Base NodeSize class to derive from.
NodeSizes are objects which are typically returned a driver's
list_sizes function. They contain a number of different
parameters which define how big an image is.
The exact parameters available depends on the provider.
N.B. Where a parameter is "unlimited" (for example bandwidth in
Amazon) this will be given as 0.
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> size = driver.list_sizes()[0]
>>> size.ram
128
>>> size.bandwidth
500
>>> size.price
4
"""
def __init__(self,
id, # type: str
name, # type: str
ram, # type: int
disk, # type: int
bandwidth, # type: Optional[int]
price, # type: float
driver, # type: NodeDriver
extra=None # type: Optional[dict]
):
"""
:param id: Size ID.
:type id: ``str``
:param name: Size name.
:type name: ``str``
:param ram: Amount of memory (in MB) provided by this size.
:type ram: ``int``
:param disk: Amount of disk storage (in GB) provided by this image.
:type disk: ``int``
:param bandwidth: Amount of bandiwdth included with this size.
:type bandwidth: ``int``
:param price: Price (in US dollars) of running this node for an hour.
:type price: ``float``
:param driver: Driver this size belongs to.
:type driver: :class:`.NodeDriver`
:param extra: Optional provider specific attributes associated with
this size.
:type extra: ``dict``
"""
self.id = str(id)
self.name = name
self.ram = ram
self.disk = disk
self.bandwidth = bandwidth
self.price = price
self.driver = driver
self.extra = extra or {}
UuidMixin.__init__(self)
def __repr__(self):
return (('<NodeSize: id=%s, name=%s, ram=%s disk=%s bandwidth=%s '
'price=%s driver=%s ...>')
% (self.id, self.name, self.ram, self.disk, self.bandwidth,
self.price, self.driver.name))
class NodeImage(UuidMixin):
"""
An operating system image.
NodeImage objects are typically returned by the driver for the
cloud provider in response to the list_images function
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> image = driver.list_images()[0]
>>> image.name
'Ubuntu 9.10'
Apart from name and id, there is no further standard information;
other parameters are stored in a driver specific "extra" variable
When creating a node, a node image should be given as an argument
to the create_node function to decide which OS image to use.
>>> node = driver.create_node(image=image)
"""
def __init__(self,
id, # type: str
name, # type: str
driver, # type: NodeDriver
extra=None # type: Optional[dict]
):
"""
:param id: Image ID.
:type id: ``str``
:param name: Image name.
:type name: ``str``
:param driver: Driver this image belongs to.
:type driver: :class:`.NodeDriver`
:param extra: Optional provided specific attributes associated with
this image.
:type extra: ``dict``
"""
self.id = str(id)
self.name = name
self.driver = driver
self.extra = extra or {}
UuidMixin.__init__(self)
def __repr__(self):
return (('<NodeImage: id=%s, name=%s, driver=%s ...>')
% (self.id, self.name, self.driver.name))
class NodeImageMember(UuidMixin):
"""
A member of an image. At some cloud providers there is a mechanism
to share images. Once an image is shared with another account that
user will be a 'member' of the image.
For example, see the image members schema in the OpenStack Image
Service API v2 documentation. https://developer.openstack.org/
api-ref/image/v2/index.html#image-members-schema
NodeImageMember objects are typically returned by the driver for the
cloud provider in response to the list_image_members method
"""
def __init__(self,
id, # type: str
image_id, # type: str
state, # type: NodeImageMemberState
driver, # type: NodeDriver
created=None, # type: datetime.datetime
extra=None # type: Optional[dict]
):
"""
:param id: Image member ID.
:type id: ``str``
:param id: The associated image ID.
:type id: ``str``
:param state: State of the NodeImageMember. If not
provided, will default to UNKNOWN.
:type state: :class:`.NodeImageMemberState`
:param driver: Driver this image belongs to.
:type driver: :class:`.NodeDriver`
:param created: A datetime object that represents when the
image member was created
:type created: ``datetime.datetime``
:param extra: Optional provided specific attributes associated with
this image.
:type extra: ``dict``
"""
self.id = str(id)
self.image_id = str(image_id)
self.state = state
self.driver = driver
self.created = created
self.extra = extra or {}
UuidMixin.__init__(self)
def __repr__(self):
return (('<NodeImageMember: id=%s, image_id=%s, '
'state=%s, driver=%s ...>')
% (self.id, self.image_id, self.state, self.driver.name))
class NodeLocation(object):
"""
A physical location where nodes can be.
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> location = driver.list_locations()[0]
>>> location.country
'US'
"""
def __init__(self,
id, # type: str
name, # type: str
country, # type: str
driver, # type: NodeDriver
extra=None # type: Optional[dict]
):
"""
:param id: Location ID.
:type id: ``str``
:param name: Location name.
:type name: ``str``
:param country: Location country.
:type country: ``str``
:param driver: Driver this location belongs to.
:type driver: :class:`.NodeDriver`
:param extra: Optional provided specific attributes associated with
this location.
:type extra: ``dict``
"""
self.id = str(id)
self.name = name
self.country = country
self.driver = driver
self.extra = extra or {}
def __repr__(self):
return (('<NodeLocation: id=%s, name=%s, country=%s, driver=%s>')
% (self.id, self.name, self.country, self.driver.name))
class NodeAuthSSHKey(object):
"""
An SSH key to be installed for authentication to a node.
This is the actual contents of the users ssh public key which will
normally be installed as root's public key on the node.
>>> pubkey = '...' # read from file
>>> from libcloud.compute.base import NodeAuthSSHKey
>>> k = NodeAuthSSHKey(pubkey)
>>> k
<NodeAuthSSHKey>
"""
def __init__(self, pubkey):
# type: (str) -> None
"""
:param pubkey: Public key material.
:type pubkey: ``str``
"""
self.pubkey = pubkey
def __repr__(self):
return '<NodeAuthSSHKey>'
class NodeAuthPassword(object):
"""
A password to be used for authentication to a node.
"""
def __init__(self, password, generated=False):
# type: (str, bool) -> None
"""
:param password: Password.
:type password: ``str``
:type generated: ``True`` if this password was automatically generated,
``False`` otherwise.
"""
self.password = password
self.generated = generated
def __repr__(self):
return '<NodeAuthPassword>'
class StorageVolume(UuidMixin):
"""
A base StorageVolume class to derive from.
"""
def __init__(self,
id, # type: str
name, # type: str
size, # type: int
driver, # type: NodeDriver
state=None, # type: Optional[StorageVolumeState]
extra=None # type: Optional[Dict]
):
# type: (...) -> None
"""
:param id: Storage volume ID.
:type id: ``str``
:param name: Storage volume name.
:type name: ``str``
:param size: Size of this volume (in GB).
:type size: ``int``
:param driver: Driver this image belongs to.
:type driver: :class:`.NodeDriver`
:param state: Optional state of the StorageVolume. If not
provided, will default to UNKNOWN.
:type state: :class:`.StorageVolumeState`
:param extra: Optional provider specific attributes.
:type extra: ``dict``
"""
self.id = id
self.name = name
self.size = size
self.driver = driver
self.extra = extra
self.state = state
UuidMixin.__init__(self)
def list_snapshots(self):
# type: () -> List[VolumeSnapshot]
"""
:rtype: ``list`` of ``VolumeSnapshot``
"""
return self.driver.list_volume_snapshots(volume=self)
def attach(self, node, device=None):
# type: (Node, Optional[str]) -> bool
"""
Attach this volume to a node.
:param node: Node to attach volume to
:type node: :class:`.Node`
:param device: Where the device is exposed,
e.g. '/dev/sdb (optional)
:type device: ``str``
:return: ``True`` if attach was successful, ``False`` otherwise.
:rtype: ``bool``
"""
return self.driver.attach_volume(node=node, volume=self, device=device)
def detach(self):
# type: () -> bool
"""
Detach this volume from its node
:return: ``True`` if detach was successful, ``False`` otherwise.
:rtype: ``bool``
"""
return self.driver.detach_volume(volume=self)
def snapshot(self, name):
# type: (str) -> VolumeSnapshot
"""
Creates a snapshot of this volume.
:return: Created snapshot.
:rtype: ``VolumeSnapshot``
"""
return self.driver.create_volume_snapshot(volume=self, name=name)
def destroy(self):
# type: () -> bool
"""
Destroy this storage volume.
:return: ``True`` if destroy was successful, ``False`` otherwise.
:rtype: ``bool``
"""
return self.driver.destroy_volume(volume=self)
def __repr__(self):
return '<StorageVolume id=%s size=%s driver=%s>' % (
self.id, self.size, self.driver.name)
class VolumeSnapshot(object):
"""
A base VolumeSnapshot class to derive from.
"""
def __init__(self,
id, # type: str
driver, # type: NodeDriver
size=None, # type: int
extra=None, # type: Optional[Dict]
created=None, # type: Optional[datetime.datetime]
state=None, # type: StorageVolumeState
name=None # type: Optional[str]
):
# type: (...) -> None
"""
VolumeSnapshot constructor.
:param id: Snapshot ID.
:type id: ``str``
:param driver: The driver that represents a connection to the
provider
:type driver: `NodeDriver`
:param size: A snapshot size in GB.
:type size: ``int``
:param extra: Provider depends parameters for snapshot.
:type extra: ``dict``
:param created: A datetime object that represents when the
snapshot was created
:type created: ``datetime.datetime``
:param state: A string representing the state the snapshot is
in. See `libcloud.compute.types.StorageVolumeState`.
:type state: ``StorageVolumeState``
:param name: A string representing the name of the snapshot
:type name: ``str``
"""
self.id = id
self.driver = driver
self.size = size
self.extra = extra or {}
self.created = created
self.state = state
self.name = name
def destroy(self):
# type: () -> bool
"""
Destroys this snapshot.
:rtype: ``bool``
"""
return self.driver.destroy_volume_snapshot(snapshot=self)
def __repr__(self):
return ('<VolumeSnapshot "%s" id=%s size=%s driver=%s state=%s>' %
(self.name, self.id, self.size, self.driver.name, self.state))
class KeyPair(object):
"""
Represents a SSH key pair.
"""
def __init__(self,
name, # type: str
public_key, # type: str
fingerprint, # type: str
driver, # type: NodeDriver
private_key=None, # type: Optional[str]
extra=None # type: Optional[Dict]
):
# type: (...) -> None
"""
Constructor.
:keyword name: Name of the key pair object.
:type name: ``str``
:keyword fingerprint: Key fingerprint.
:type fingerprint: ``str``
:keyword public_key: Public key in OpenSSH format.
:type public_key: ``str``
:keyword private_key: Private key in PEM format.
:type private_key: ``str``
:keyword extra: Provider specific attributes associated with this
key pair. (optional)
:type extra: ``dict``
"""
self.name = name
self.fingerprint = fingerprint
self.public_key = public_key
self.private_key = private_key
self.driver = driver
self.extra = extra or {}
def __repr__(self):
return ('<KeyPair name=%s fingerprint=%s driver=%s>' %
(self.name, self.fingerprint, self.driver.name))
class NodeDriver(BaseDriver):
"""
A base NodeDriver class to derive from
This class is always subclassed by a specific driver. For
examples of base behavior of most functions (except deploy node)
see the dummy driver.
"""
connectionCls = ConnectionKey # type: Type[Connection]
name = None # type: str
api_name = None # type: str
website = None # type: str
type = None # type: Union[Provider,str]
port = None # type: int
features = {'create_node': []} # type: Dict[str, List[str]]
"""
List of available features for a driver.
- :meth:`libcloud.compute.base.NodeDriver.create_node`
- ssh_key: Supports :class:`.NodeAuthSSHKey` as an authentication
method for nodes.
- password: Supports :class:`.NodeAuthPassword` as an
authentication
method for nodes.
- generates_password: Returns a password attribute on the Node
object returned from creation.
"""
NODE_STATE_MAP = {} # type: Dict[str, NodeState]
def list_nodes(self, *args, **kwargs):
# type: (Any, Any) -> List[Node]
"""
List all nodes.
:return: list of node objects
:rtype: ``list`` of :class:`.Node`
"""
raise NotImplementedError(
'list_nodes not implemented for this driver')
def list_sizes(self, location=None):
# type: (Optional[NodeLocation]) -> List[NodeSize]
"""
List sizes on a provider
:param location: The location at which to list sizes
:type location: :class:`.NodeLocation`
:return: list of node size objects
:rtype: ``list`` of :class:`.NodeSize`
"""
raise NotImplementedError(
'list_sizes not implemented for this driver')
def list_locations(self):
# type: () -> List[NodeLocation]
"""
List data centers for a provider
:return: list of node location objects
:rtype: ``list`` of :class:`.NodeLocation`
"""
raise NotImplementedError(
'list_locations not implemented for this driver')
def create_node(self,
name, # type: str
size, # type: NodeSize
image, # type: NodeImage
location=None, # type: Optional[NodeLocation]
auth=None # type: T_Auth
):
# type: (...) -> Node
"""
Create a new node instance. This instance will be started
automatically.
Not all hosting API's are created equal and to allow libcloud to
support as many as possible there are some standard supported
variations of ``create_node``. These are declared using a
``features`` API.
You can inspect ``driver.features['create_node']`` to see what
variation of the API you are dealing with:
``ssh_key``
You can inject a public key into a new node allows key based SSH
authentication.
``password``
You can inject a password into a new node for SSH authentication.
If no password is provided libcloud will generated a password.
The password will be available as
``return_value.extra['password']``.
``generates_password``
The hosting provider will generate a password. It will be returned
to you via ``return_value.extra['password']``.
Some drivers allow you to set how you will authenticate with the
instance that is created. You can inject this initial authentication
information via the ``auth`` parameter.
If a driver supports the ``ssh_key`` feature flag for ``created_node``
you can upload a public key into the new instance::
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> auth = NodeAuthSSHKey('pubkey data here')
>>> node = driver.create_node("test_node", auth=auth)
If a driver supports the ``password`` feature flag for ``create_node``
you can set a password::
>>> driver = DummyNodeDriver(0)
>>> auth = NodeAuthPassword('mysecretpassword')
>>> node = driver.create_node("test_node", auth=auth)
If a driver supports the ``password`` feature and you don't provide the
``auth`` argument libcloud will assign a password::
>>> driver = DummyNodeDriver(0)
>>> node = driver.create_node("test_node")
>>> password = node.extra['password']
A password will also be returned in this way for drivers that declare
the ``generates_password`` feature, though in that case the password is
actually provided to the driver API by the hosting provider rather than
generated by libcloud.
You can only pass a :class:`.NodeAuthPassword` or
:class:`.NodeAuthSSHKey` to ``create_node`` via the auth parameter if
has the corresponding feature flag.
:param name: String with a name for this new node (required)
:type name: ``str``
:param size: The size of resources allocated to this node.
(required)
:type size: :class:`.NodeSize`
:param image: OS Image to boot on node. (required)
:type image: :class:`.NodeImage`
:param location: Which data center to create a node in. If empty,
undefined behavior will be selected. (optional)
:type location: :class:`.NodeLocation`
:param auth: Initial authentication information for the node
(optional)
:type auth: :class:`.NodeAuthSSHKey` or :class:`NodeAuthPassword`
:return: The newly created node.
:rtype: :class:`.Node`
"""
raise NotImplementedError(
'create_node not implemented for this driver')
def deploy_node(self,
deploy, # type: Deployment
ssh_username='root', # type: str
ssh_alternate_usernames=None, # type: Optional[List[str]]
ssh_port=22, # type: int
ssh_timeout=10, # type: int
ssh_key=None, # type: Optional[T_Ssh_key]
auth=None, # type: T_Auth
timeout=SSH_CONNECT_TIMEOUT, # type: int
max_tries=3, # type: int
ssh_interface='public_ips', # type: str
**create_node_kwargs):
# type: (...) -> Node
"""
Create a new node, and start deployment.
In order to be able to SSH into a created node access credentials are
required.
A user can pass either a :class:`.NodeAuthPassword` or
:class:`.NodeAuthSSHKey` to the ``auth`` argument. If the
``create_node`` implementation supports that kind if credential (as
declared in ``self.features['create_node']``) then it is passed on to
``create_node``. Otherwise it is not passed on to ``create_node`` and
it is only used for authentication.
If the ``auth`` parameter is not supplied but the driver declares it
supports ``generates_password`` then the password returned by
``create_node`` will be used to SSH into the server.
Finally, if the ``ssh_key_file`` is supplied that key will be used to
SSH into the server.
This function may raise a :class:`DeploymentException`, if a
create_node call was successful, but there is a later error (like SSH
failing or timing out). This exception includes a Node object which
you may want to destroy if incomplete deployments are not desirable.
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> from libcloud.compute.deployment import ScriptDeployment
>>> from libcloud.compute.deployment import MultiStepDeployment
>>> from libcloud.compute.base import NodeAuthSSHKey
>>> driver = DummyNodeDriver(0)
>>> key = NodeAuthSSHKey('...') # read from file
>>> script = ScriptDeployment("yum -y install emacs strace tcpdump")
>>> msd = MultiStepDeployment([key, script])
>>> def d():
... try:
... driver.deploy_node(deploy=msd)
... except NotImplementedError:
... print ("not implemented for dummy driver")
>>> d()
not implemented for dummy driver
Deploy node is typically not overridden in subclasses. The
existing implementation should be able to handle most such.
:param deploy: Deployment to run once machine is online and
available to SSH.
:type deploy: :class:`Deployment`
:param ssh_username: Optional name of the account which is used
when connecting to
SSH server (default is root)
:type ssh_username: ``str``
:param ssh_alternate_usernames: Optional list of ssh usernames to
try to connect with if using the
default one fails
:type ssh_alternate_usernames: ``list``
:param ssh_port: Optional SSH server port (default is 22)
:type ssh_port: ``int``
:param ssh_timeout: Optional SSH connection timeout in seconds
(default is 10)
:type ssh_timeout: ``float``
:param auth: Initial authentication information for the node
(optional)
:type auth: :class:`.NodeAuthSSHKey` or :class:`NodeAuthPassword`
:param ssh_key: A path (or paths) to an SSH private key with which
to attempt to authenticate. (optional)
:type ssh_key: ``str`` or ``list`` of ``str``
:param timeout: How many seconds to wait before timing out.
(default is 600)
:type timeout: ``int``
:param max_tries: How many times to retry if a deployment fails
before giving up (default is 3)
:type max_tries: ``int``
:param ssh_interface: The interface to wait for. Default is
'public_ips', other option is 'private_ips'.
:type ssh_interface: ``str``
"""
if not libcloud.compute.ssh.have_paramiko:
raise RuntimeError('paramiko is not installed. You can install ' +
'it using pip: pip install paramiko')
if auth:
if not isinstance(auth, (NodeAuthSSHKey, NodeAuthPassword)):
raise NotImplementedError(
'If providing auth, only NodeAuthSSHKey or'
'NodeAuthPassword is supported')
elif ssh_key:
# If an ssh_key is provided we can try deploy_node
pass
elif 'create_node' in self.features:
f = self.features['create_node']
if 'generates_password' not in f and "password" not in f:
raise NotImplementedError(
'deploy_node not implemented for this driver')
else:
raise NotImplementedError(
'deploy_node not implemented for this driver')
# NOTE 1: This is a workaround for legacy code. Sadly a lot of legacy
# code uses **kwargs in "create_node()" method and simply ignores
# "deploy_node()" arguments which are passed to it.
# That's obviously far from idea that's why we first try to pass only
# non-deploy node arguments to the "create_node()" methods and if it
# that doesn't work, fall back to the old approach and simply pass in
# all the arguments
# NOTE 2: Some drivers which use password based SSH authentication
# rely on password being stored on the "auth" argument and that's why
# we also propagate that argument to "create_node()" method.
try:
# NOTE: We only pass auth to the method if auth argument is
# provided
if auth:
node = self.create_node(auth=auth, **create_node_kwargs)
else:
node = self.create_node(**create_node_kwargs)
except TypeError as e:
msg_1_re = (r'create_node\(\) missing \d+ required '
'positional arguments.*')
msg_2_re = r'create_node\(\) takes at least \d+ arguments.*'
if re.match(msg_1_re, str(e)) or re.match(msg_2_re, str(e)):
# pylint: disable=unexpected-keyword-arg
node = self.create_node( # type: ignore
deploy=deploy,
ssh_username=ssh_username,
ssh_alternate_usernames=ssh_alternate_usernames,
ssh_port=ssh_port,
ssh_timeout=ssh_timeout,
ssh_key=ssh_key,
auth=auth,
timeout=timeout,
max_tries=max_tries,
ssh_interface=ssh_interface,
**create_node_kwargs)
# pylint: enable=unexpected-keyword-arg
else:
raise e
password = None
if auth:
if isinstance(auth, NodeAuthPassword):
password = auth.password
elif 'password' in node.extra:
password = node.extra['password']
wait_timeout = timeout or NODE_ONLINE_WAIT_TIMEOUT
# Wait until node is up and running and has IP assigned
try:
node, ip_addresses = self.wait_until_running(
nodes=[node],
wait_period=3,
timeout=wait_timeout,
ssh_interface=ssh_interface)[0]
except Exception as e:
raise DeploymentError(node=node, original_exception=e, driver=self)
ssh_alternate_usernames = ssh_alternate_usernames or []
deploy_timeout = timeout or SSH_CONNECT_TIMEOUT
deploy_error = None
for username in ([ssh_username] + ssh_alternate_usernames):
try:
self._connect_and_run_deployment_script(
task=deploy, node=node,
ssh_hostname=ip_addresses[0], ssh_port=ssh_port,
ssh_username=username, ssh_password=password,
ssh_key_file=ssh_key, ssh_timeout=ssh_timeout,
timeout=deploy_timeout, max_tries=max_tries)
except Exception as e:
# Try alternate username
# Todo: Need to fix paramiko so we can catch a more specific
# exception
deploy_error = e
else:
# Script successfully executed, don't try alternate username
deploy_error = None
break
if deploy_error is not None:
raise DeploymentError(node=node, original_exception=deploy_error,
driver=self)
return node
def reboot_node(self, node):
# type: (Node) -> bool
"""
Reboot a node.
:param node: The node to be rebooted
:type node: :class:`.Node`
:return: True if the reboot was successful, otherwise False
:rtype: ``bool``
"""
raise NotImplementedError(
'reboot_node not implemented for this driver')
def start_node(self, node):
# type: (Node) -> bool
"""
Start a node.
:param node: The node to be started
:type node: :class:`.Node`
:return: True if the start was successful, otherwise False
:rtype: ``bool``
"""
raise NotImplementedError(
'start_node not implemented for this driver')
def stop_node(self, node):
# type: (Node) -> bool
"""
Stop a node
:param node: The node to be stopped.
:type node: :class:`.Node`
:return: True if the stop was successful, otherwise False
:rtype: ``bool``
"""
raise NotImplementedError(
'stop_node not implemented for this driver')
def destroy_node(self, node):
# type: (Node) -> bool
"""
Destroy a node.
Depending upon the provider, this may destroy all data associated with
the node, including backups.
:param node: The node to be destroyed
:type node: :class:`.Node`
:return: True if the destroy was successful, False otherwise.
:rtype: ``bool``
"""
raise NotImplementedError(
'destroy_node not implemented for this driver')
##
# Volume and snapshot management methods
##
def list_volumes(self):
# type: () -> List[StorageVolume]
"""
List storage volumes.
:rtype: ``list`` of :class:`.StorageVolume`
"""
raise NotImplementedError(
'list_volumes not implemented for this driver')
def list_volume_snapshots(self, volume):
# type: (StorageVolume) -> List[VolumeSnapshot]
"""
List snapshots for a storage volume.
:rtype: ``list`` of :class:`VolumeSnapshot`
"""
raise NotImplementedError(
'list_volume_snapshots not implemented for this driver')
def create_volume(self,
size, # type: int
name, # type: str
location=None, # Optional[NodeLocation]
snapshot=None # Optional[VolumeSnapshot]
):
# type: (...) -> StorageVolume
"""
Create a new volume.
:param size: Size of volume in gigabytes (required)
:type size: ``int``
:param name: Name of the volume to be created
:type name: ``str``
:param location: Which data center to create a volume in. If
empty, undefined behavior will be selected.
(optional)
:type location: :class:`.NodeLocation`
:param snapshot: Snapshot from which to create the new
volume. (optional)
:type snapshot: :class:`.VolumeSnapshot`
:return: The newly created volume.
:rtype: :class:`StorageVolume`
"""
raise NotImplementedError(
'create_volume not implemented for this driver')
def create_volume_snapshot(self, volume, name=None):
# type: (StorageVolume, Optional[str]) -> VolumeSnapshot
"""
Creates a snapshot of the storage volume.
:param volume: The StorageVolume to create a VolumeSnapshot from
:type volume: :class:`.StorageVolume`
:param name: Name of created snapshot (optional)
:type name: `str`
:rtype: :class:`VolumeSnapshot`
"""
raise NotImplementedError(
'create_volume_snapshot not implemented for this driver')
def attach_volume(self, node, volume, device=None):
# type: (Node, StorageVolume, Optional[str]) -> bool
"""
Attaches volume to node.
:param node: Node to attach volume to.
:type node: :class:`.Node`
:param volume: Volume to attach.
:type volume: :class:`.StorageVolume`
:param device: Where the device is exposed, e.g. '/dev/sdb'
:type device: ``str``
:rytpe: ``bool``
"""
raise NotImplementedError('attach not implemented for this driver')
def detach_volume(self, volume):
# type: (StorageVolume) -> bool
"""
Detaches a volume from a node.
:param volume: Volume to be detached
:type volume: :class:`.StorageVolume`
:rtype: ``bool``
"""
raise NotImplementedError('detach not implemented for this driver')
def destroy_volume(self, volume):
# type: (StorageVolume) -> bool
"""
Destroys a storage volume.
:param volume: Volume to be destroyed
:type volume: :class:`StorageVolume`
:rtype: ``bool``
"""
raise NotImplementedError(
'destroy_volume not implemented for this driver')
def destroy_volume_snapshot(self, snapshot):
# type: (VolumeSnapshot) -> bool
"""
Destroys a snapshot.
:param snapshot: The snapshot to delete
:type snapshot: :class:`VolumeSnapshot`
:rtype: :class:`bool`
"""
raise NotImplementedError(
'destroy_volume_snapshot not implemented for this driver')
##
# Image management methods
##
def list_images(self, location=None):
# type: (Optional[NodeLocation]) -> List[NodeImage]
"""
List images on a provider.
:param location: The location at which to list images.
:type location: :class:`.NodeLocation`
:return: list of node image objects.
:rtype: ``list`` of :class:`.NodeImage`
"""
raise NotImplementedError(
'list_images not implemented for this driver')
def create_image(self, node, name, description=None):
# type: (Node, str, Optional[str]) -> List[NodeImage]
"""
Creates an image from a node object.
:param node: Node to run the task on.
:type node: :class:`.Node`
:param name: name for new image.
:type name: ``str``
:param description: description for new image.
:type name: ``description``
:rtype: :class:`.NodeImage`:
:return: NodeImage instance on success.
"""
raise NotImplementedError(
'create_image not implemented for this driver')
def delete_image(self, node_image):
# type: (NodeImage) -> bool
"""
Deletes a node image from a provider.
:param node_image: Node image object.
:type node_image: :class:`.NodeImage`
:return: ``True`` if delete_image was successful, ``False`` otherwise.
:rtype: ``bool``
"""
raise NotImplementedError(
'delete_image not implemented for this driver')
def get_image(self, image_id):
# type: (str) -> NodeImage
"""
Returns a single node image from a provider.
:param image_id: Node to run the task on.
:type image_id: ``str``
:rtype :class:`.NodeImage`:
:return: NodeImage instance on success.
"""
raise NotImplementedError(
'get_image not implemented for this driver')
def copy_image(self, source_region, node_image, name, description=None):
# type: (str, NodeImage, str, Optional[str]) -> NodeImage
"""
Copies an image from a source region to the current region.
:param source_region: Region to copy the node from.
:type source_region: ``str``
:param node_image: NodeImage to copy.
:type node_image: :class:`.NodeImage`:
:param name: name for new image.
:type name: ``str``
:param description: description for new image.
:type name: ``str``
:rtype: :class:`.NodeImage`:
:return: NodeImage instance on success.
"""
raise NotImplementedError(
'copy_image not implemented for this driver')
##
# SSH key pair management methods
##
def list_key_pairs(self):
# type: () -> List[KeyPair]
"""
List all the available key pair objects.
:rtype: ``list`` of :class:`.KeyPair` objects
"""
raise NotImplementedError(
'list_key_pairs not implemented for this driver')
def get_key_pair(self, name):
# type: (str) -> KeyPair
"""
Retrieve a single key pair.
:param name: Name of the key pair to retrieve.
:type name: ``str``
:rtype: :class:`.KeyPair`
"""
raise NotImplementedError(
'get_key_pair not implemented for this driver')
def create_key_pair(self, name):
# type: (str) -> KeyPair
"""
Create a new key pair object.
:param name: Key pair name.
:type name: ``str``
:rtype: :class:`.KeyPair` object
"""
raise NotImplementedError(
'create_key_pair not implemented for this driver')
def import_key_pair_from_string(self, name, key_material):
# type: (str, str) -> KeyPair
"""
Import a new public key from string.
:param name: Key pair name.
:type name: ``str``
:param key_material: Public key material.
:type key_material: ``str``
:rtype: :class:`.KeyPair` object
"""
raise NotImplementedError(
'import_key_pair_from_string not implemented for this driver')
def import_key_pair_from_file(self, name, key_file_path):
# type: (str, str) -> KeyPair
"""
Import a new public key from string.
:param name: Key pair name.
:type name: ``str``
:param key_file_path: Path to the public key file.
:type key_file_path: ``str``
:rtype: :class:`.KeyPair` object
"""
key_file_path = os.path.expanduser(key_file_path)
with open(key_file_path, 'r') as fp:
key_material = fp.read().strip()
return self.import_key_pair_from_string(name=name,
key_material=key_material)
def delete_key_pair(self, key_pair):
# type: (KeyPair) -> bool
"""
Delete an existing key pair.
:param key_pair: Key pair object.
:type key_pair: :class:`.KeyPair`
:rtype: ``bool``
"""
raise NotImplementedError(
'delete_key_pair not implemented for this driver')
def wait_until_running(self,
nodes, # type: List[Node]
wait_period=3, # type: float
timeout=600, # type: int
ssh_interface='public_ips', # type: str
force_ipv4=True, # type: bool
ex_list_nodes_kwargs=None # type: Optional[Dict]
):
# type: (...) -> List[Tuple[Node, List[str]]]
"""
Block until the provided nodes are considered running.
Node is considered running when it's state is "running" and when it has
at least one IP address assigned.
:param nodes: List of nodes to wait for.
:type nodes: ``list`` of :class:`.Node`
:param wait_period: How many seconds to wait between each loop
iteration. (default is 3)
:type wait_period: ``int``
:param timeout: How many seconds to wait before giving up.
(default is 600)
:type timeout: ``int``
:param ssh_interface: Which attribute on the node to use to obtain
an IP address. Valid options: public_ips,
private_ips. Default is public_ips.
:type ssh_interface: ``str``
:param force_ipv4: Ignore IPv6 addresses (default is True).
:type force_ipv4: ``bool``
:param ex_list_nodes_kwargs: Optional driver-specific keyword arguments
which are passed to the ``list_nodes``
method.
:type ex_list_nodes_kwargs: ``dict``
:return: ``[(Node, ip_addresses)]`` list of tuple of Node instance and
list of ip_address on success.
:rtype: ``list`` of ``tuple``
"""
ex_list_nodes_kwargs = ex_list_nodes_kwargs or {}
def is_supported(address):
# type: (str) -> bool
"""
Return True for supported address.
"""
if force_ipv4 and not is_valid_ip_address(address=address,
family=socket.AF_INET):
return False
return True
def filter_addresses(addresses):
# type: (List[str]) -> List[str]
"""
Return list of supported addresses.
"""
return [address for address in addresses if is_supported(address)]
if ssh_interface not in ['public_ips', 'private_ips']:
raise ValueError('ssh_interface argument must either be ' +
'public_ips or private_ips')
start = time.time()
end = start + timeout
uuids = set([node.uuid for node in nodes])
while time.time() < end:
all_nodes = self.list_nodes(**ex_list_nodes_kwargs)
matching_nodes = list([node for node in all_nodes
if node.uuid in uuids])
if len(matching_nodes) > len(uuids):
found_uuids = [node.uuid for node in matching_nodes]
msg = ('Unable to match specified uuids ' +
'(%s) with existing nodes. Found ' % (uuids) +
'multiple nodes with same uuid: (%s)' % (found_uuids))
raise LibcloudError(value=msg, driver=self)
running_nodes = [node for node in matching_nodes
if node.state == NodeState.RUNNING]
addresses = []
for node in running_nodes:
node_addresses = filter_addresses(getattr(node, ssh_interface))
if len(node_addresses) >= 1:
addresses.append(node_addresses)
if len(running_nodes) == len(uuids) == len(addresses):
return list(zip(running_nodes, addresses))
else:
time.sleep(wait_period)
continue
raise LibcloudError(value='Timed out after %s seconds' % (timeout),
driver=self)
def _get_and_check_auth(self, auth):
# type: (T_Auth) -> T_Auth
"""
Helper function for providers supporting :class:`.NodeAuthPassword` or
:class:`.NodeAuthSSHKey`
Validates that only a supported object type is passed to the auth
parameter and raises an exception if it is not.
If no :class:`.NodeAuthPassword` object is provided but one is expected
then a password is automatically generated.
"""
if isinstance(auth, NodeAuthPassword):
if 'password' in self.features['create_node']:
return auth
raise LibcloudError(
'Password provided as authentication information, but password'
'not supported', driver=self)
if isinstance(auth, NodeAuthSSHKey):
if 'ssh_key' in self.features['create_node']:
return auth
raise LibcloudError(
'SSH Key provided as authentication information, but SSH Key'
'not supported', driver=self)
if 'password' in self.features['create_node']:
value = os.urandom(16)
value = binascii.hexlify(value).decode('ascii')
# Some providers require password to also include uppercase
# characters so convert some characters to uppercase
password = ''
for char in value:
if not char.isdigit() and char.islower():
if random.randint(0, 1) == 1:
char = char.upper()
password += char
return NodeAuthPassword(password, generated=True)
if auth:
raise LibcloudError(
'"auth" argument provided, but it was not a NodeAuthPassword'
'or NodeAuthSSHKey object', driver=self)
def _wait_until_running(self, node, wait_period=3, timeout=600,
ssh_interface='public_ips', force_ipv4=True):
# type: (Node, float, int, str, bool) -> List[Tuple[Node, List[str]]]
# This is here for backward compatibility and will be removed in the
# next major release
return self.wait_until_running(nodes=[node], wait_period=wait_period,
timeout=timeout,
ssh_interface=ssh_interface,
force_ipv4=force_ipv4)
def _ssh_client_connect(self, ssh_client, wait_period=1.5, timeout=300):
# type: (BaseSSHClient, float, int) -> BaseSSHClient
"""
Try to connect to the remote SSH server. If a connection times out or
is refused it is retried up to timeout number of seconds.
:param ssh_client: A configured SSHClient instance
:type ssh_client: ``SSHClient``
:param wait_period: How many seconds to wait between each loop
iteration. (default is 1.5)
:type wait_period: ``int``
:param timeout: How many seconds to wait before giving up.
(default is 300)
:type timeout: ``int``
:return: ``SSHClient`` on success
"""
start = time.time()
end = start + timeout
while time.time() < end:
try:
ssh_client.connect()
except SSH_TIMEOUT_EXCEPTION_CLASSES as e:
# Errors which represent fatal invalid key files which should
# be propagated to the user
message = str(e).lower()
invalid_key_msgs = [
'no such file or directory',
'invalid key',
'not a valid ',
]
# Propagate (key) file doesn't exist errors
# NOTE: Paramiko only supports PEM private key format
# See https://github.com/paramiko/paramiko/issues/1313
# for details
for invalid_key_msg in invalid_key_msgs:
if invalid_key_msg in message:
raise e
# Retry if a connection is refused, timeout occurred,
# or the connection fails due to failed authentication.
ssh_client.close()
time.sleep(wait_period)
continue
else:
return ssh_client
raise LibcloudError(value='Could not connect to the remote SSH ' +
'server. Giving up.', driver=self)
def _connect_and_run_deployment_script(
self,
task, # type: Deployment
node, # type: Node
ssh_hostname, # type: str
ssh_port, # type: int
ssh_username, # type: str
ssh_password, # type: Optional[str]
ssh_key_file, # type:Optional[T_Ssh_key]
ssh_timeout, # type: int
timeout, # type: int
max_tries # type: int
):
"""
Establish an SSH connection to the node and run the provided deployment
task.
:rtype: :class:`.Node`:
:return: Node instance on success.
"""
ssh_client = SSHClient(hostname=ssh_hostname,
port=ssh_port, username=ssh_username,
password=ssh_password,
key_files=ssh_key_file,
timeout=ssh_timeout)
ssh_client = self._ssh_client_connect(ssh_client=ssh_client,
timeout=timeout)
# Execute the deployment task
node = self._run_deployment_script(task=task, node=node,
ssh_client=ssh_client,
max_tries=max_tries)
return node
def _run_deployment_script(self, task, node, ssh_client, max_tries=3):
# type: (Deployment, Node, BaseSSHClient, int) -> Node
"""
Run the deployment script on the provided node. At this point it is
assumed that SSH connection has already been established.
:param task: Deployment task to run.
:type task: :class:`Deployment`
:param node: Node to run the task on.
:type node: ``Node``
:param ssh_client: A configured and connected SSHClient instance.
:type ssh_client: :class:`SSHClient`
:param max_tries: How many times to retry if a deployment fails
before giving up. (default is 3)
:type max_tries: ``int``
:rtype: :class:`.Node`
:return: ``Node`` Node instance on success.
"""
tries = 0
while tries < max_tries:
try:
node = task.run(node, ssh_client)
except Exception as e:
tries += 1
if tries >= max_tries:
raise LibcloudError(value='Failed after %d tries: %s'
% (max_tries, str(e)), driver=self)
else:
# Deployment succeeded
ssh_client.close()
return node
return node
def _get_size_price(self, size_id):
# type: (str) -> float
"""
Return pricing information for the provided size id.
"""
return get_size_price(driver_type='compute',
driver_name=self.api_name,
size_id=size_id)
if __name__ == '__main__':
import doctest
doctest.testmod()
| 33.707701
| 79
| 0.569397
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.