hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1ddeb4640afb7464adb779919965594b59498d6c | 4,342 | py | Python | openmv_cam/sensors.py | Adridri24/Robocup | aa403f73cdc5f7cdf9e3bb8afd997c2b0fa081d3 | [
"MIT"
] | 1 | 2021-07-27T21:47:06.000Z | 2021-07-27T21:47:06.000Z | openmv_cam/sensors.py | ajayat/robocup | aa403f73cdc5f7cdf9e3bb8afd997c2b0fa081d3 | [
"MIT"
] | null | null | null | openmv_cam/sensors.py | ajayat/robocup | aa403f73cdc5f7cdf9e3bb8afd997c2b0fa081d3 | [
"MIT"
] | null | null | null | import ustruct as struct
import sensor
import pyb
import ulogging as logging
logger = logging.Logger(__name__)
class Sensor:
"""A class that represent the Arduino-controlled sensors
Offers an interface for ultrasonics sensors and line sensors (IR).
"""
PIN = 2
SLAVE_ADDRESS = 0x10
def __init__(self):
self.__i2c = pyb.I2C(self.PIN)
self.__i2c.init(pyb.I2C.MASTER)
def __repr__(self) -> str:
return "Sensor(pin={}, address={})".format(self.PIN, self.SLAVE_ADDRESS)
def recv(self) -> tuple:
"""Requests the Arduino controller via I2C and unpack data received
from sensors.
Returns:
(front_dist, back_dist, line_sensors):
The front and back distance and a list of four line sensors values.
"""
# creates a buffer of 12 bytes (6 * typeof(int))
buffer = bytearray(12)
# receive data from sensor buffer will be filled in-place
self.__i2c.recv(buffer, Sensor.SLAVE_ADDRESS)
# https://docs.python.org/3/library/struct.html
front_dist, back_dist, *line_sensors = struct.unpack(">6H", buffer)
return front_dist, back_dist, line_sensors
class Camera:
"""A class to groups functions related to OpenMV Cam
More infos:
https://openmv.io/products/openmv-cam-h7
Attributes:
width: the screen width in pixels
height: the screen height in pixels
"""
# List of thresholds (can be obtained in Open MV) that match the element color
THRESHOLDS = [(0, 100, 127, 42, -128, 127)] # RED COLOR
BALL_DIAMETER = 45 # in millimeters
FOCAL_LENGTH = 2.8
HFOV = 70.8 # horizontal field of view in degrees
VFOV = 55.6
REAL_SIZE = 7 # in millimeters
def __init__(self):
"""Initialize the LED to show state and setup the camera sensor"""
self._red_led = pyb.LED(1) # Turns led on (red color)
self._red_led.on()
# Setup sensor settings
# https://docs.openmv.io/library/omv.sensor.html#constants
sensor.reset()
sensor.set_vflip(True) # Reverse image on vertical axis
sensor.set_hmirror(True) # Reverse image on horizontal axis
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_auto_gain(False) # Must be turned off for color tracking
# Must be turned off for color tracking
sensor.set_auto_whitebal(False)
@staticmethod
def distance_to(blob) -> float:
"""Calculate real distance between camera and object.
Args:
blob: a blob object (you can get one with ball_blob())
Returns:
The distance in millimeters.
"""
obj_width_on_sensor = (Camera.REAL_SIZE * blob.h() / 2) / sensor.width()
distance = (Camera.BALL_DIAMETER * Camera.FOCAL_LENGTH) / obj_width_on_sensor
return distance
@staticmethod
def get_angle(blob) -> float:
"""Get horizontal relative angle (in degrees) of the blob.
Args:
blob: a blob object (you can get one with ball_blob())
Returns:
The angle between [-35.4, +35.4]
"""
rel_angle = Camera.HFOV * (blob.cxf() - sensor.width() / 2) / sensor.width()
return rel_angle
def shutdown(self):
"""Shutdown the camera and the LED"""
sensor.shutdown()
self._red_led.off()
@staticmethod
def ball_blob():
"""
Takes a snapshot and find pixels area matching with thresholds .
Returns:
image.blob | None: returns a blob object if visible.
Additionnal informations can be found here about the blob object:
https://docs.openmv.io/library/omv.image.html
"""
img = sensor.snapshot()
# Only blobs with more 50 pixels and area are returned
for blob in img.find_blobs(
Camera.THRESHOLDS, pixels_threshold=50, area_threshold=50
):
if blob.roundness() < 0.3:
continue
if pyb.USB_VCP().debug_mode_enabled():
# If the cam is connected to OpenMV IDE
img.draw_rectangle(blob.rect())
img.draw_cross(blob.cx(), blob.cy())
return blob # we need only one blob
return None
| 32.893939 | 85 | 0.620451 | 4,223 | 0.972593 | 0 | 0 | 1,735 | 0.399585 | 0 | 0 | 2,147 | 0.494473 |
1ddf116bdca6613e7a3cef9650883687a600fa9d | 179 | py | Python | dmutils/__init__.py | robot2051/dto-digitalmarketplace-utils | e581be6396c12473697398b0ec9d253c564a324b | [
"MIT"
] | null | null | null | dmutils/__init__.py | robot2051/dto-digitalmarketplace-utils | e581be6396c12473697398b0ec9d253c564a324b | [
"MIT"
] | null | null | null | dmutils/__init__.py | robot2051/dto-digitalmarketplace-utils | e581be6396c12473697398b0ec9d253c564a324b | [
"MIT"
] | null | null | null | from . import logging, config, proxy_fix, formats, request_id
from .flask_init import init_app, init_frontend_app, init_manager
import flask_featureflags
__version__ = '24.4.1'
| 25.571429 | 65 | 0.810056 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.044693 |
1de0594f63b502fd9fe8aa8efd742514e03f84cb | 1,453 | py | Python | withings_data/views.py | MPikDev/withings_weight_display | 8d0a91c949cc9c2f1323db9986f68330cf21e4bf | [
"MIT"
] | null | null | null | withings_data/views.py | MPikDev/withings_weight_display | 8d0a91c949cc9c2f1323db9986f68330cf21e4bf | [
"MIT"
] | null | null | null | withings_data/views.py | MPikDev/withings_weight_display | 8d0a91c949cc9c2f1323db9986f68330cf21e4bf | [
"MIT"
] | null | null | null | from django.http import HttpResponse
from django.shortcuts import render, redirect
from models import User
from django.contrib.auth import authenticate, login
from django.conf import settings
from django.contrib.auth.decorators import login_required
def login(request):
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
print 'user , authenticate ', user
if not request.user.is_authenticated():
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
if user is not None:
if user.is_active:
login(request, user)
return HttpResponse("Sucessful", status=200)
else:
return render(request, 'withings_data/get_data.html')
else:
# Return an 'invalid login' error message.
return HttpResponse("Sucessful", status=200)
def logout_view(request):
logout(request)
# Redirect to a success page.
@login_required
def withings_page(request):
return render(request, 'withings_data/get_data.html')
@login_required
def get_data(request):
email = request.POST.get("email", "")
password = request.POST.get("password", "")
if str(email) is '' or str(password) is '':
return HttpResponse('No Email and/or Password', status=403)
else:
# print 'Email: %s, Password: %s' % (email, password)
User.objects.get_or_create(first_name='fname', last_name='lname', email=email, password=password)
return HttpResponse("Sucessful", status=200)
| 28.490196 | 99 | 0.748107 | 0 | 0 | 0 | 0 | 538 | 0.370268 | 0 | 0 | 334 | 0.229869 |
1de174b38b26130877be87229aea9bb86c21ce03 | 52 | py | Python | backend/models/__init__.py | hajajmaor/e_commerce_website | c3cb4b7acb4447231358ee153128dab1ef7096f5 | [
"MIT"
] | null | null | null | backend/models/__init__.py | hajajmaor/e_commerce_website | c3cb4b7acb4447231358ee153128dab1ef7096f5 | [
"MIT"
] | 7 | 2021-04-06T16:01:12.000Z | 2021-04-29T07:11:40.000Z | backend/models/__init__.py | hajajmaor/e_commerce_website | c3cb4b7acb4447231358ee153128dab1ef7096f5 | [
"MIT"
] | null | null | null | from .cart import receipt_col as receipt_collection
| 26 | 51 | 0.865385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1de1dc48d04018d54f9948a56fb3009257d670db | 14,662 | py | Python | wwzplotter.py | skiehl/wwz | e48a85d415bc2e3c4308a17ae511534372d52cba | [
"BSD-3-Clause"
] | null | null | null | wwzplotter.py | skiehl/wwz | e48a85d415bc2e3c4308a17ae511534372d52cba | [
"BSD-3-Clause"
] | null | null | null | wwzplotter.py | skiehl/wwz | e48a85d415bc2e3c4308a17ae511534372d52cba | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#!/usr/bin/env python
"""A class for plotting results of the weighted wavelet z-transform analysis.
"""
import matplotlib.gridspec as gs
import matplotlib.pyplot as plt
from matplotlib.ticker import LogLocator
import numpy as np
import os
import sys
__author__ = "Sebastian Kiehlmann"
__credits__ = ["Sebastian Kiehlmann"]
__license__ = "GPL"
__version__ = "1.1"
__maintainer__ = "Sebastian Kiehlmann"
__email__ = "skiehlmann@mail.de"
__status__ = "Production"
#==============================================================================
# CLASSES
#==============================================================================
class WWZPlotter:
"""A class for plotting WWZ results."""
#--------------------------------------------------------------------------
def __init__(self, wwz, tunit=None):
"""A class for plotting WWZ results.
Parameters
----------
wwz : wwz.WWZ
A WWZ instance that is used for plotting.
Returns
-------
None.
"""
self.wwz = wwz
self.okay = True
if wwz.wwz is None:
print('Note: There is no WWZ transform data stored in this WWZ ' \
'instance. There will be nothing to plot.')
self.okay = False
if wwz.freq is not None:
# check if frequencies are linearly scaled:
freq = self.wwz.freq
df = np.diff(freq)
self.linear_freq = np.all(np.isclose(df, df.mean()))
self.fmin = freq.min()
self.fmax = freq.max()
# get periods and check if linearly scaled:
period = 1. / freq
dp = np.diff(period)
self.linear_period = np.all(np.isclose(dp, dp.mean()))
self.pmin = period.min()
self.pmax = period.max()
self.n_ybins = freq.size
else:
self.okay = False
if wwz.tau is not None:
self.tmin = wwz.tau.min()
self.tmax = wwz.tau.max()
else:
self.okay = False
if self.okay:
if self.linear_freq:
self.ymin = self.fmax
self.ymax = self.fmin
self.ymin_alt = self.pmax
self.ymax_alt = self.pmin
self.ylabel = f'Frequency [1/{tunit}]' \
if isinstance(tunit, str) else 'Frequency'
self.ylabel_alt = f'Period [{tunit}]' \
if isinstance(tunit, str) else 'Period'
elif self.linear_period:
self.ymin = self.pmin
self.ymax = self.pmax
self.ymin_alt = self.fmin
self.ymax_alt = self.fmax
self.ylabel = f'Period [{tunit}]' if isinstance(tunit, str) \
else 'Period'
self.ylabel_alt = f'Frequency [1/{tunit}]' \
if isinstance(tunit, str) else 'Frequency'
else:
self.ymin = 0
self.ymax = 1
self.ymin_alt = 1
self.ymax_alt = 0
self.ylabel = 'Non-linear scale'
self.ylabel_alt = 'Non-linear scale'
#--------------------------------------------------------------------------
def _select_map(self, select):
"""Helper method to select a map from a WWZ instance.
Parameters
----------
select : str
Select either 'wwz' or 'wwa'.
Raises
------
ValueError
Raised if 'select' is not one of the allowed options.
Returns
-------
result : numpy.ndarray
The selected WWZ or WWA array.
"""
# check that selection is allowed:
if select.lower() not in ['wwz', 'wwa']:
raise ValueError(f"'{select}' is not a valid selection.")
select = select.lower()
result = eval(f'self.wwz.{select}')
# check if result map is available:
if result is None:
print(f'No {select.upper()} transform available.')
result = result.transpose()
return result
#--------------------------------------------------------------------------
def plot_map(
self, select, ax=None, xlabel=None, **kwargs):
"""Plot the resulting map from a WWZ instance.
Parameters
----------
select : str
Select either 'wwz' or 'wwa'.
ax : matplotlib.pyplot.axis, optional
The axis to plot to. If None is given a new axis is crated. The
default is None.
xlabel : str, optional
The x-axis label. If None is provided no label is placed. The
default is None.
kwargs : dict, optional
Keyword arguments forwarded to the matplotlib.pyplot.imshow()
function.
Returns
-------
matplotlib.pyplot.axis
The axis to which the map was plotted.
matplotlib.image.AxesImage
The image.
"""
if not self.okay:
return None, None
# select result:
result = self._select_map(select)
if result is None:
return None, None
# create figure if needed:
if ax is None:
__, ax = plt.subplots(1)
# plot:
extent = [self.tmin, self.tmax, self.ymin, self.ymax]
im = ax.imshow(
result, origin='upper', aspect='auto', extent=extent,
**kwargs)
# add labels:
if xlabel:
ax.set_xlabel(xlabel)
ax.set_ylabel(self.ylabel)
return ax, im
#--------------------------------------------------------------------------
def plot_map_avg(
self, select, statistic='mean', ax=None, ylabel=False, **kwargs):
"""Vertically plot an average along the time axis of the transform map.
Parameters
----------
select : str
Select either 'wwz' or 'wwa'.
statistic : str, optional
Choose either 'mean' or 'median'. The default is 'mean'.
ax : matplotlib.pyplot.axis, optional
The axis to plot to. If None is given a new axis is crated. The
default is None.
ylabel : bool, optional
If True a label is added to the y-axis. The default is False.
**kwargs : dict
Keyword arguments forwarded to the matplotlib.pyplot.plot()
function.
Raises
------
ValueError
Raised if 'statistic' is not one of the allowed options.
Returns
-------
matplotlib.pyplot.axis
The axis to which the data was plotted.
"""
if not self.okay:
return None
# select result:
result = self._select_map(select)
if result is None:
return None, None
# calculate statistic:
if statistic not in ['mean', 'median']:
raise ValueError(f"'{statistic}' is not a valid statistic.")
elif statistic == 'median':
result_avg = np.median(result, axis=1)
else:
result_avg = np.mean(result, axis=1)
# create figure if needed:
if ax is None:
__, ax = plt.subplots(1)
# plot:
y = np.linspace(self.ymin, self.ymax, result_avg.size)
ax.plot(result_avg[::-1], y, **kwargs)
# add labels:
if ylabel:
ax.set_ylabel(self.ylabel)
ax.set_xlabel(f'{statistic.capitalize()} {select.upper()}')
return ax
#--------------------------------------------------------------------------
def plot_data(
self, ax=None, errorbars=True, xlabel=None, ylabel=None, **kwargs):
"""Plot the data stored in a WWZ instance.
Parameters
----------
ax : matplotlib.pyplot.axis, optional
The axis to plot to. If None is given a new axis is crated. The
default is None.
errorbars : bool, optional
If True errorbars are shown, if uncertainties were stored in the
WWZ instance. The default is True.
xlabel : str, optional
The x-axis description. If None is provided no label is printed.
The default is None.
ylabel : str, optional
The y-axis description. If None is provided no label is printed.
The default is None.
**kwargs : dict
Keyword arguments forwarded to the matplotlib.pyplot.errorbar()
function.
Returns
-------
matplotlib.pyplot.axis
The axis to which the data was plotted.
"""
# check if data is available:
if self.wwz.t is None:
print('No data available.')
return None
# create figure if needed:
if ax is None:
__, ax = plt.subplots(1)
# plot:
if errorbars and self.wwz.s_x is not None:
ax.errorbar(self.wwz.t, self.wwz.x, self.wwz.s_x, **kwargs)
else:
ax.plot(self.wwz.t, self.wwz.x, **kwargs)
# add labels:
if isinstance(xlabel, str):
ax.set_xlabel(xlabel)
if isinstance(ylabel, str):
ax.set_ylabel(ylabel)
return ax
#--------------------------------------------------------------------------
def add_right_labels(self, ax):
"""Add ticks and labels to the right side of a plot showing the
alternative unit, i.e. frequency if period is used on the left side and
vice versa.
Parameters
----------
ax : matplotlib.pyplot.axis, optional
The axis to plot to. If None is given a new axis is crated. The
default is None.
Returns
-------
ax2 : matplotlib.pyplot.axis
The new axis to which the labels were added.
"""
ax2 = ax.twinx()
plt.setp(ax2.get_xticklabels(), visible=False)
ax2.yaxis.set_label_position("right")
ax2.yaxis.tick_right()
ax2.set_ylim(self.ymin_alt, self.ymax_alt)
sys.stderr = open(os.devnull, "w") # silence stderr to supress warning
conversion = lambda x: 1/x
ax2.set_yscale('function', functions=(conversion, conversion))
sys.stderr = sys.__stderr__ # unsilence stderr
ax2.yaxis.set_major_locator(LogLocator(subs='all'))
ax2.set_ylabel(self.ylabel_alt)
return ax2
#--------------------------------------------------------------------------
def plot(self, select, statistic='mean', errorbars=True,
peaks_quantile=None, xlabel=None, ylabel=None, figsize=None,
height_ratios=(2, 1), width_ratios=(5, 1), kwargs_map={},
kwargs_map_avg={}, kwargs_data={}, kwargs_peaks={}):
"""Plot the WWZ map, average, and data.
Parameters
----------
select : str
Select either 'wwz' or 'wwa'.
statistic : str, optional
Choose either 'mean' or 'median'. The default is 'mean'.
errorbars : bool, optional
If True errorbars are shown, if uncertainties were stored in the
WWZ instance. The default is True.
peaks_quantile : float, optional
If not None, a ridge line along the peak position is shown.
peaks_quantile needs to be a float between 0 and 1. Only peaks in
the quantile above this threshold are shown. The default is None.
xlabel : str, optional
The x-axis description. If None is provided no label is printed.
The default is None.
ylabel : str, optional
The y-axis description. If None is provided no label is printed.
The default is None.
figsize : tuple, optional
Set the figure size. The default is None.
height_ratios : tuple, optional
Set the size ratio between the top and bottom panel with two values
in a tuple. The default is (2, 1).
width_ratios : tuple, optional
Set the size ratio between the left and right panel with two values
in a tuple. The default is (5, 1).
kwargs_map : dict, optional
Keyword arguments forwarded to plotting the map. The default is {}.
kwargs_map_avg : dict, optional
Keyword arguments forwarded to plotting the map average. The
default is {}.
kwargs_data : dict, optional
Keyword arguments forwarded to plotting the data. The default is
{}.
kwargs_peaks : dict, optional
Keyword arguments forwarded to plotting the peak ridge lines. The
default is {}.
Returns
-------
ax_map : matplotlib.pyplot.axis
The map axis.
ax_map_avg : matplotlib.pyplot.axis
The map average axis.
ax_data : matplotlib.pyplot.axis
The data axis.
"""
# create figure:
plt.figure(figsize=figsize)
grid = gs.GridSpec(
2, 2, hspace=0, wspace=0, height_ratios=height_ratios,
width_ratios=width_ratios)
ax_map = plt.subplot(grid[0,0])
ax_map_avg = plt.subplot(grid[0,1])
ax_data = plt.subplot(grid[1,0])
# plot map:
self.plot_map(
select, ax=ax_map, **kwargs_map)
# plot map average:
self.plot_map_avg(
select, statistic=statistic, ax=ax_map_avg, **kwargs_map_avg)
extend = (self.ymax - self.ymin) / (self.n_ybins - 1) / 2.
ax_map_avg.set_ylim(self.ymin-extend, self.ymax+extend)
# plot data:
self.plot_data(
ax=ax_data, errorbars=errorbars, xlabel=xlabel, ylabel=ylabel,
**kwargs_data)
ax_data.set_xlim(self.tmin, self.tmax)
# plot peaks:
if peaks_quantile:
peak_tau, peak_pos, peak_signal = self.wwz.find_peaks(
select, peaks_quantile)
ax_map.plot(peak_tau, peak_pos, **kwargs_peaks)
# add right axis labels:
self.add_right_labels(ax_map_avg)
# add data axis labels:
ax_data.set_xlabel(xlabel)
ax_data.set_ylabel(ylabel)
plt.setp(ax_map_avg.get_yticklabels(), visible=False)
plt.setp(ax_map.get_xticklabels(), visible=False)
return ax_map, ax_map_avg, ax_data
#==============================================================================
| 33.398633 | 79 | 0.524349 | 13,921 | 0.949461 | 0 | 0 | 0 | 0 | 0 | 0 | 7,958 | 0.542764 |
1de5367a1f3c604d3f330ae4e673bd72ff6587fa | 7,651 | py | Python | preprocessing/emotic/load_data_from_numpy.py | GKalliatakis/DisplaceNet | 439bcd5ed4133b040baa107c215170eb963aa343 | [
"MIT"
] | 7 | 2019-05-13T01:49:43.000Z | 2020-02-19T04:16:35.000Z | preprocessing/emotic/load_data_from_numpy.py | GKalliatakis/DisplaceNet | 439bcd5ed4133b040baa107c215170eb963aa343 | [
"MIT"
] | null | null | null | preprocessing/emotic/load_data_from_numpy.py | GKalliatakis/DisplaceNet | 439bcd5ed4133b040baa107c215170eb963aa343 | [
"MIT"
] | 4 | 2019-05-28T16:06:31.000Z | 2020-02-27T09:29:16.000Z | """Python utilities required to load data (image & their annotations) stored in numpy arrays.
Functions `load_numpy_arrays_single_output` & `load_numpy_arrays_emotions_age_only` are deprecated.
Use either the main function `load_data_from_numpy` to load all the applicable arrays
or the supporting `load_annotations_only_from_numpy` instead.
"""
from __future__ import print_function
import numpy as np
def load_data_from_numpy(main_numpy_dir,
verbose = 1):
print ('[INFO] Loading data from numpy arrays...')
x_entire_train = np.load(main_numpy_dir + 'X_train/x_entire_train.npy')
x_cropped_train = np.load(main_numpy_dir + 'X_train/x_cropped_train.npy')
valence_entire_train = np.load(main_numpy_dir + 'Y_train/valence_train.npy')
valence_cropped_train = np.load(main_numpy_dir + 'Y_train/valence_train.npy')
arousal_entire_train = np.load(main_numpy_dir + 'Y_train/arousal_train.npy')
arousal_cropped_train = np.load(main_numpy_dir + 'Y_train/arousal_train.npy')
dominance_entire_train = np.load(main_numpy_dir + 'Y_train/dominance_train.npy')
dominance_cropped_train = np.load(main_numpy_dir + 'Y_train/dominance_train.npy')
x_entire_val = np.load(main_numpy_dir + 'X_train/x_entire_val.npy')
x_cropped_val = np.load(main_numpy_dir + 'X_train/x_cropped_val.npy')
valence_entire_val = np.load(main_numpy_dir + 'Y_train/valence_val.npy')
valence_cropped_val = np.load(main_numpy_dir + 'Y_train/valence_val.npy')
arousal_entire_val = np.load(main_numpy_dir + 'Y_train/arousal_val.npy')
arousal_cropped_val = np.load(main_numpy_dir + 'Y_train/arousal_val.npy')
dominance_entire_val = np.load(main_numpy_dir + 'Y_train/dominance_val.npy')
dominance_cropped_val = np.load(main_numpy_dir + 'Y_train/dominance_val.npy')
x_entire_test = np.load(main_numpy_dir + 'X_train/x_entire_test.npy')
x_cropped_test = np.load(main_numpy_dir + 'X_train/x_cropped_test.npy')
valence_entire_test = np.load(main_numpy_dir + 'Y_train/valence_test.npy')
valence_cropped_test = np.load(main_numpy_dir + 'Y_train/valence_test.npy')
arousal_entire_test = np.load(main_numpy_dir + 'Y_train/arousal_test.npy')
arousal_cropped_test = np.load(main_numpy_dir + 'Y_train/arousal_test.npy')
dominance_entire_test = np.load(main_numpy_dir + 'Y_train/dominance_test.npy')
dominance_cropped_test = np.load(main_numpy_dir + 'Y_train/dominance_test.npy')
print('[INFO] Data have been successfully loaded')
print('---------------------------------------------------------------------------------------------------')
if verbose == 1:
print('x_entire_train shape:', x_entire_train.shape)
print('x_cropped_train shape:', x_cropped_train.shape)
print('valence_entire_train shape:', valence_entire_train.shape)
print('valence_cropped_train shape:', valence_cropped_train.shape)
print('arousal_entire_train shape:', arousal_entire_train.shape)
print('arousal_cropped_train shape:', arousal_cropped_train.shape)
print('dominance_entire_train shape:', dominance_entire_train.shape)
print('dominance_cropped_train shape:', dominance_cropped_train.shape)
print ('---------------------------------------------------------------------------------------------------')
print('x_entire_val shape:', x_entire_val.shape)
print('x_cropped_val shape:', x_cropped_val.shape)
print('valence_entire_val shape:', valence_entire_val.shape)
print('valence_cropped_val shape:', valence_cropped_val.shape)
print('arousal_entire_val shape:', arousal_entire_val.shape)
print('arousal_cropped_val shape:', arousal_cropped_val.shape)
print('dominance_entire_val shape:', dominance_entire_val.shape)
print('dominance_cropped_val shape:', dominance_cropped_val.shape)
print ('---------------------------------------------------------------------------------------------------')
print('x_entire_test shape:', x_entire_test.shape)
print('x_cropped_test shape:', x_cropped_test.shape)
print('valence_entire_test shape:', valence_entire_test.shape)
print('valence_cropped_test shape:', valence_cropped_test.shape)
print('arousal_entire_test shape:', arousal_entire_test.shape)
print('arousal_cropped_test shape:', arousal_cropped_test.shape)
print('dominance_entire_test shape:', dominance_entire_test.shape)
print('dominance_cropped_test shape:', dominance_cropped_test.shape)
print ('---------------------------------------------------------------------------------------------------')
return (x_entire_train, x_cropped_train,valence_entire_train,valence_cropped_train,arousal_entire_train,arousal_cropped_train,dominance_entire_train,dominance_cropped_train), \
(x_entire_val, x_cropped_val, valence_entire_val,valence_cropped_val,arousal_entire_val,arousal_cropped_val,dominance_entire_val,dominance_cropped_val), \
(x_entire_test, x_cropped_test, valence_entire_test,valence_cropped_test,arousal_entire_test,arousal_cropped_test,dominance_entire_test,dominance_cropped_test)
def load_data_from_numpy_single_output(main_numpy_dir,
verbose=1):
print ('[INFO] Loading data from numpy arrays...')
x_image_train = np.load(main_numpy_dir + 'X_train/x_image_train.npy')
x_body_train = np.load(main_numpy_dir + 'X_train/x_body_train.npy')
y_image_train = np.load(main_numpy_dir + 'Y_train/y_train.npy')
y_body_train = np.load(main_numpy_dir + 'Y_train/y_train.npy')
x_image_val = np.load(main_numpy_dir + 'X_train/x_image_val.npy')
x_body_val = np.load(main_numpy_dir + 'X_train/x_body_val.npy')
y_image_val = np.load(main_numpy_dir + 'Y_train/y_val.npy')
y_body_val = np.load(main_numpy_dir + 'Y_train/y_val.npy')
x_image_test = np.load(main_numpy_dir + 'X_train/x_image_test.npy')
x_body_test = np.load(main_numpy_dir + 'X_train/x_body_test.npy')
y_image_test = np.load(main_numpy_dir + 'Y_train/y_test.npy')
y_body_test = np.load(main_numpy_dir + 'Y_train/y_test.npy')
print('[INFO] Data have been successfully loaded')
print('---------------------------------------------------------------------------------------------------')
if verbose == 1:
print('x_image_train shape:', x_image_train.shape)
print('x_body_train shape:', x_body_train.shape)
print('y_image_train shape:', y_image_train.shape)
print('y_body_train shape:', y_body_train.shape)
print ('---------------------------------------------------------------------------------------------------')
print('x_image_val shape:', x_image_val.shape)
print('x_body_val shape:', x_body_val.shape)
print('y_image_val shape:', y_image_val.shape)
print('y_body_val shape:', y_body_val.shape)
print ('---------------------------------------------------------------------------------------------------')
print('x_image_test shape:', x_image_test.shape)
print('x_body_test shape:', x_body_test.shape)
print('y_image_test shape:', y_image_test.shape)
print('y_body_test shape:', y_body_test.shape)
print ('---------------------------------------------------------------------------------------------------')
return (x_image_train, x_body_train,y_image_train,y_body_train), \
(x_image_val, x_body_val, y_image_val,y_body_val), \
(x_image_test, x_body_test,y_image_test,y_body_test)
| 46.369697 | 180 | 0.656385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,159 | 0.412887 |
1de617306ea8b95ead3e14011fe6581a03bd62b9 | 15,575 | py | Python | services/core/Ambient/ambient/agent.py | gnmerritt/volttron | ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3 | [
"Apache-2.0"
] | 406 | 2015-01-20T03:08:53.000Z | 2022-03-31T20:59:07.000Z | services/core/Ambient/ambient/agent.py | gnmerritt/volttron | ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3 | [
"Apache-2.0"
] | 2,031 | 2015-01-05T21:35:45.000Z | 2022-03-29T21:44:36.000Z | services/core/Ambient/ambient/agent.py | gnmerritt/volttron | ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3 | [
"Apache-2.0"
] | 219 | 2015-01-20T14:53:57.000Z | 2022-03-06T00:37:41.000Z | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright (c) 2017, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official,
# policies either expressed or implied, of the FreeBSD Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization
# that has cooperated in the development of these materials, makes
# any warranty, express or implied, or assumes any legal liability
# or responsibility for the accuracy, completeness, or usefulness or
# any information, apparatus, product, software, or process disclosed,
# or represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does
# not necessarily constitute or imply its endorsement, recommendation,
# r favoring by the United States Government or any agency thereof,
# or Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
__docformat__ = 'reStructuredText'
import logging
import datetime
import pytz
import sys
import grequests
# requests should be imported after grequests as requests imports ssl and grequests patches ssl
import requests
import pkg_resources
from volttron.platform.agent import utils
from volttron.platform.vip.agent import RPC
from volttron.platform.agent.utils import format_timestamp
from volttron.platform.agent.base_weather import BaseWeatherAgent
from volttron.platform import jsonapi
_log = logging.getLogger(__name__)
utils.setup_logging()
__version__ = "0.1"
def ambient(config_path, **kwargs):
"""
Parses the Agent configuration and returns an instance of the agent created using that configuration.
:param config_path: Path to a configuration file.
:type config_path: str
:returns: Ambient
:rtype: Ambient
"""
try:
config = utils.load_config(config_path)
except Exception:
config = {}
if not config:
_log.error("Ambient agent configuration: ".format(config))
for key in ["api_key", "application_key"]:
if not config.get(key) or not isinstance(config.get(key), str):
raise RuntimeError("Ambient agent must be configured with '{}' key.".format(key))
_log.debug("config_dict before init: {}".format(config))
utils.update_kwargs_with_config(kwargs, config)
return Ambient(**kwargs)
class Ambient(BaseWeatherAgent):
"""
The Ambient agent requires having an API key to interact with the remote API. The agent offers a performance_mode
configuration option which allows users to limit the amount of data returned by the API.
"""
def __init__(self, application_key="", **kwargs):
super(Ambient, self).__init__(**kwargs)
_log.debug("vip_identity: " + self.core.identity)
self.headers = {"Accept": "application/json",
"Accept-Language": "en-US"
}
self.remove_service("get_hourly_historical")
self.remove_service("get_hourly_forecast")
self.app_key = application_key
self.last_service_call_timestamp = None
@RPC.export
def get_version(self):
"""
Provides the current version of the agent.
:return: current version number in string format.
"""
return __version__
def validate_location(self, service_name, location):
"""
Indicates whether the location dictionary provided matches the format required by the remote weather API
:param service_name: name of the remote API service
:param location: location dictionary to provide in the remote API url
:return: True if the location matches the required format else False
"""
return isinstance(location.get("location", None), str)
def get_update_interval(self, service_name):
"""
Indicates the interval between remote API updates
:param service_name: requested service endpoint
:return: datetime timedelta representing the time interval
"""
if service_name == "get_current_weather":
return datetime.timedelta(minutes=5)
else:
return None
def get_api_description(self, service_name):
"""
Provides a human-readable description of the various endpoints provided by the agent
:param service_name: requested service endpoint
:return: Human-readable description string
"""
if service_name is "get_current_weather":
"Provides current weather observations for locations by their corresponding Ambient weather station name " \
"via RPC (Requires {'location': <station location string>})"
else:
raise RuntimeError(
"Service {} is not implemented by Ambient.".format(service_name))
def get_point_name_defs_file(self):
"""
Constructs the point name mapping dict from the mapping csv.
:return: dictionary containing a mapping of service point names to standard point names with optional
"""
# returning resource file instead of stream, as csv.DictReader require file path or file like object opened in
# text mode.
return pkg_resources.resource_filename(__name__, "data/name_mapping.csv")
def query_current_weather(self, location):
"""
Retrieve data from the Ambient API, return formatted current data and store forecast data in cache
:param location: location dictionary requested by the user
:return: Timestamp and data for current data from the Ambient API
"""
ambient_response = self.make_request()
location_response = None
current_time = None
for record in ambient_response:
record_location = None
record_info = record.pop("info")
if record_info:
record_location = record_info.get("location", "")
if record_location:
weather_data = record.get("lastData", {})
weather_data["macAddress"] = record.pop("macAddress", "")
weather_data["name"] = record_info.get("name", "")
# "date": "2019-04-25T17:09:00.000Z"
weather_tz_string = weather_data.get('tz', None)
if weather_tz_string:
weather_tz = pytz.timezone(weather_tz_string)
else:
weather_tz = pytz.utc
weather_date = datetime.datetime.strptime(
weather_data.pop("date"), "%Y-%m-%dT%H:%M:%S.%fZ").astimezone(weather_tz)
if location["location"] == record_location:
current_time = format_timestamp(weather_date)
location_response = weather_data
else:
weather_data = self.apply_mapping(weather_data)
self.store_weather_records("get_current_weather",
[jsonapi.dumps({"location": record_location}),
weather_date,
jsonapi.dumps(weather_data)])
else:
raise RuntimeError("API record contained improper 'info' format")
return current_time, location_response
def query_forecast_service(self, service, location, quantity, forecast_start):
"""
Unimplemented method stub
:param service: forecast service type of weather data to return
:param location: location dictionary requested during the RPC call
:param quantity: number of records to return, used to generate Time Machine requests after the forecast request
:param forecast_start: forecast results that are prior to this timestamp will be filtered by base weather agent
:return: Timestamp and data returned by the Ambient weather API response
"""
raise NotImplementedError
def make_request(self):
"""
Request data from the Ambient Weather API
An example of the return value is as follows
[
{
"macAddress": "18:93:D7:3B:89:0C",
"lastData": {
"dateutc": 1556212140000,
"tempinf": 71.9,
"humidityin": 31,
"battout": "1",
"temp1f": 68.7,
"humidity1": 36,
"batt1": "1",
"date": "2019-04-25T17:09:00.000Z"
},
"info": {
"name": "Home B WS",
"location": "Lab Home B"
}
},
{
"macAddress": "50:F1:4A:F7:3C:C4",
"lastData": {
"dateutc": 1556211960000,
"tempinf": 82.5,
"humidityin": 27,
"battout": "1",
"temp1f": 68.5,
"humidity1": 42,
"batt1": "1",
"date": "2019-04-25T17:06:00.000Z"
},
"info": {
"name": "Home A WS",
"location": "Lab Home A"
}
}
]
:return:
"""
# AuthenticationTwo API Keys are required for all REST API requests:applicationKey - identifies the
# developer / application. To request an application key please email support@ambient.comapiKey -
# grants access to past/present data for a given user's devices. A typical consumer-facing application will
# initially ask the user to create an apiKey on their Ambient.net account page
# (https://dashboard.ambientweather.net/account) and paste it into the app. Developers for personal or
# in-house apps will also need to create an apiKey on their own account page.
# Rate LimitingAPI requests are capped at 1 request/second for each user's apiKey and 3 requests/second
# per applicationKey. When this limit is exceeded, the API will return a 429 response code.
# Please be kind to our servers :)
# If the previous call to the API was at least 3 seconds ago - this is a constraint set by Ambient
if not self.last_service_call_timestamp or (
datetime.datetime.now() - self.last_service_call_timestamp).total_seconds() > 3:
url = 'https://api.ambientweather.net/v1/devices?applicationKey=' + self.app_key + '&apiKey=' + \
self._api_key
_log.info("requesting url: {}".format(url))
grequest = [grequests.get(url, verify=requests.certs.where(), headers=self.headers, timeout=30)]
gresponse = grequests.map(grequest)[0]
if gresponse is None:
raise RuntimeError("get request did not return any response")
try:
response = jsonapi.loads(gresponse.content)
self.last_service_call_timestamp = datetime.datetime.now()
return response
except ValueError:
self.last_service_call_timestamp = datetime.datetime.now()
self.generate_response_error(url, gresponse.status_code)
else:
raise RuntimeError("Previous API call to Ambient service is too recent, please wait at least 3 seconds "
"between API calls.")
def query_hourly_forecast(self, location):
"""
Unimplemented method stub
:param location: currently accepts lat/long location dictionary format only
:return: time of forecast prediction as a timestamp string, and a list of
"""
raise NotImplementedError
def query_hourly_historical(self, location, start_date, end_date):
"""
Unimplemented method stub
:param location: no format currently determined for history.
:param start_date: Starting date for historical weather period.
:param end_date: Ending date for historical weather period.
:return: NotImplementedError
"""
raise NotImplementedError
def generate_response_error(self, url, response_code):
"""
Raises a descriptive runtime error based on the response code returned by a service.
:param url: actual url used for requesting data from Ambient
:param response_code: Http response code returned by a service following a request
"""
code_x100 = int(response_code / 100)
if code_x100 == 2:
raise RuntimeError("Remote API returned no data(code:{}, url:{})".format(response_code, url))
elif code_x100 == 3:
raise RuntimeError(
"Remote API redirected request, but redirect failed (code:{}, url:{})".format(response_code, url))
elif code_x100 == 4:
raise RuntimeError(
"Request ({}) rejected by remote API: Remote API returned Code {}".format(url, response_code))
elif code_x100 == 5:
raise RuntimeError(
"Remote API returned invalid response (code:{}, url:{})".format(response_code, url))
else:
raise RuntimeError(
"API request failed with unexpected response code (code:{}, url:{})".format(response_code, url))
def main():
"""Main method called to start the agent."""
utils.vip_main(ambient,
version=__version__)
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| 44.121813 | 120 | 0.642953 | 11,015 | 0.707223 | 0 | 0 | 198 | 0.012713 | 0 | 0 | 9,757 | 0.626453 |
1de63b87d5455ccb25aeab1b6a213dfe5402bcb9 | 7,601 | py | Python | process.py | imdeepmind/AmazonReview-LanguageGenerationDataset | 64856e5c0fb0718203585c217bb5443344ef177d | [
"MIT"
] | 3 | 2019-08-29T17:38:11.000Z | 2021-01-08T13:07:35.000Z | process.py | imdeepmind/AmazonReview-LanguageGenerationDataset | 64856e5c0fb0718203585c217bb5443344ef177d | [
"MIT"
] | null | null | null | process.py | imdeepmind/AmazonReview-LanguageGenerationDataset | 64856e5c0fb0718203585c217bb5443344ef177d | [
"MIT"
] | 1 | 2019-08-26T11:09:59.000Z | 2019-08-26T11:09:59.000Z | import sqlite3
import pandas as pd
import re
import random
from bs4 import BeautifulSoup
class Process:
SEQ_LENGTH = 40
sql_transaction = []
dataset = []
cursor_train = None
cursor_validation = None
cursor_test = None
# I know in advance that there are 199819620 rows
NUM_ROWS = 199819620
train_size = None
val_size = None
test_size = None
counter = 0
def __init__(self, path, database_dir, split=(0.8, 0.1, 0.1), SEQ_LENGTH=40):
self.SEQ_LENGTH = SEQ_LENGTH
# Connecting to the train database
connection_train = sqlite3.connect(database_dir + "/sequence_train.db")
c = connection_train.cursor()
self.cursor_train = c
# Connecting to the validation database
connection_validation = sqlite3.connect(database_dir + "/sequence_val.db")
c = connection_validation.cursor()
self.cursor_validation = c
# Connecting to the test database
connection_test = sqlite3.connect(database_dir + "/sequence_test.db")
c = connection_test.cursor()
self.cursor_test = c
train, val, test = split
if (train + val + test) != 1.0:
raise ValueError('Invalid split data')
self.train_size = int(self.NUM_ROWS * train)
self.val_size = int(self.NUM_ROWS * val)
self.test_size = int(self.NUM_ROWS * test)
print('--Reading the dataset--')
# Reading the dataset
data = pd.read_csv(path, sep='\t', error_bad_lines=False)
# Filtering it
data = data[data['verified_purchase'] == 'Y']
# Selecting reviews with review length > SEQ_LENGTH
data = data[data['review_body'].str.len() > SEQ_LENGTH]
# Selecting review_body column
data = data[['review_body']]
# Dropping empty rows
data = data.dropna()
# Shuffling the data
data = data.sample(frac=1)
data = data.values
self.dataset = data
def create_table(self):
self.cursor_train.execute("CREATE TABLE IF NOT EXISTS reviews(review TEXT, next TEXT);")
self.cursor_validation.execute("CREATE TABLE IF NOT EXISTS reviews(review TEXT, next TEXT);")
self.cursor_test.execute("CREATE TABLE IF NOT EXISTS reviews(review TEXT, next TEXT);")
def transaction_bldr(self, sql, db):
self.sql_transaction.append(sql)
if len(self.sql_transaction) > 1000:
random.shuffle(self.sql_transaction)
if db == 'train':
self.cursor_train.execute('BEGIN TRANSACTION')
for s in self.sql_transaction:
try:
self.cursor_train.execute(s)
except Exception as ex:
print('Transaction fail ', ex)
print('SQL ', s)
self.cursor_train.execute('commit')
self.sql_transaction = []
elif db == 'val':
self.cursor_validation.execute('BEGIN TRANSACTION')
for s in self.sql_transaction:
try:
self.cursor_validation.execute(s)
except Exception as ex:
print('Transaction fail ', ex)
print('SQL ', s)
self.cursor_validation.execute('commit')
self.sql_transaction = []
elif db == 'test':
self.cursor_test.execute('BEGIN TRANSACTION')
for s in self.sql_transaction:
try:
self.cursor_test.execute(s)
except Exception as ex:
print('Transaction fail ', ex)
print('SQL ', s)
self.cursor_test.execute('commit')
self.sql_transaction = []
def insertData(self, sequence, nxt, db):
try:
sql = "INSERT INTO reviews(review, next) VALUES('{}', '{}');".format(sequence, nxt)
self.transaction_bldr(sql, db)
except Exception as e:
print('Something went wrong when inserting the data into database, ',str(e))
def deEmojify(self,inputString):
return inputString.encode('ascii', 'ignore').decode('ascii')
def clean_review(self,review):
# Changing to lowercase
review = self.deEmojify(review.lower())
# Changing he'll to he will
review = re.sub(r"i'm", "i am", review)
review = re.sub(r"aren't", "are not", review)
review = re.sub(r"couldn't", "counld not", review)
review = re.sub(r"didn't", "did not", review)
review = re.sub(r"doesn't", "does not", review)
review = re.sub(r"don't", "do not", review)
review = re.sub(r"hadn't", "had not", review)
review = re.sub(r"hasn't", "has not", review)
review = re.sub(r"haven't", "have not", review)
review = re.sub(r"isn't", "is not", review)
review = re.sub(r"it't", "had not", review)
review = re.sub(r"hadn't", "had not", review)
review = re.sub(r"won't", "will not", review)
review = re.sub(r"can't", "cannot", review)
review = re.sub(r"mightn't", "might not", review)
review = re.sub(r"mustn't", "must not", review)
review = re.sub(r"needn't", "need not", review)
review = re.sub(r"shouldn't", "should not", review)
review = re.sub(r"wasn't", "was not", review)
review = re.sub(r"weren't", "were not", review)
review = re.sub(r"won't", "will not", review)
review = re.sub(r"wouldn't", "would not", review)
review = re.sub(r"\'s", " is", review)
review = re.sub(r"\'ll", " will", review)
review = re.sub(r"\'ve", " have", review)
review = re.sub(r"\'re", " are", review)
review = re.sub(r"\'d", " would", review)
review = re.sub(r"'", " ", review)
review = re.sub(r'"', " ", review)
# Removing links and other stuffs from string
review = re.sub(r'''(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))''', '', review, flags=re.MULTILINE)
review = BeautifulSoup(review, "lxml").text
return review
def process(self):
for index, review in enumerate(self.dataset):
if index % 1000 == 0:
print('--Preprocessing {}th review--'.format(index+1))
review = self.clean_review(review[0])
for k in range(len(review) - self.SEQ_LENGTH):
# Seleting the sequence
seq = review[k:self.SEQ_LENGTH + k]
nxt = review[self.SEQ_LENGTH + k]
if self.counter < self.train_size:
self.insertData(seq, nxt, 'train')
elif self.counter < self.train_size + self.val_size:
self.insertData(seq, nxt, 'val')
elif self.counter < self.train_size + self.val_size + self.test_size:
self.insertData(seq, nxt, 'test')
self.counter += 1
process = Process('dataset/02.tsv', 'dataset', (.8, .1, .1), 40)
process.create_table()
process.process()
| 39.180412 | 245 | 0.52822 | 7,397 | 0.971883 | 0 | 0 | 0 | 0 | 0 | 0 | 1,852 | 0.243332 |
1de641bdba821c7515279839b852f0ac77f42640 | 55 | py | Python | src/torchphysics/utils/data/__init__.py | uwe-iben/torchphysics | f0a56539cff331d49caaa90bc2fdd0d238b298f8 | [
"Apache-2.0"
] | 203 | 2021-11-10T10:33:29.000Z | 2022-03-26T09:05:12.000Z | src/torchphysics/utils/data/__init__.py | DKreuter/torchphysics | 775d9aca71752a568f1fca972c958b99107f3b7c | [
"Apache-2.0"
] | 3 | 2022-01-07T19:57:00.000Z | 2022-03-10T08:04:49.000Z | src/torchphysics/utils/data/__init__.py | DKreuter/torchphysics | 775d9aca71752a568f1fca972c958b99107f3b7c | [
"Apache-2.0"
] | 16 | 2021-09-30T08:35:37.000Z | 2022-03-16T13:12:22.000Z | from .dataloader import PointsDataset, PointsDataLoader | 55 | 55 | 0.890909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1de7a0dc0cdbaa0b1dee69836abf352a586d2b83 | 445 | py | Python | src/panel/urls.py | cybekRT/DJPajton2 | e461070269a5b39bac6c8df1eb739ea79c7099b5 | [
"MIT"
] | null | null | null | src/panel/urls.py | cybekRT/DJPajton2 | e461070269a5b39bac6c8df1eb739ea79c7099b5 | [
"MIT"
] | null | null | null | src/panel/urls.py | cybekRT/DJPajton2 | e461070269a5b39bac6c8df1eb739ea79c7099b5 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
#url(r'^view/(?P<pk>[0-9]+)', views.ArticleDetailView.as_view(), name = "detail"),
#url(r'', views.ArticleIndexView.as_view(), name = "index"),
url(r'login', views.Login.as_view()),
url(r'logout', views.Logout.as_view()),
url(r'register', views.Register.as_view()),
url(r'token/(.+)', views.Token.as_view()),
url(r'', views.Panel),
]
| 34.230769 | 87 | 0.611236 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 188 | 0.422472 |
1de7f7ec7324ad33e6191ad8327f06347d390541 | 3,035 | py | Python | Gems/WhiteBox/Editor/Scripts/WhiteBox.py | aaarsene/o3de | 37e3b0226958974defd14dd6d808e8557dcd7345 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-09-13T00:01:12.000Z | 2021-09-13T00:01:12.000Z | Gems/WhiteBox/Editor/Scripts/WhiteBox.py | aaarsene/o3de | 37e3b0226958974defd14dd6d808e8557dcd7345 | [
"Apache-2.0",
"MIT"
] | null | null | null | Gems/WhiteBox/Editor/Scripts/WhiteBox.py | aaarsene/o3de | 37e3b0226958974defd14dd6d808e8557dcd7345 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-07-20T11:07:25.000Z | 2021-07-20T11:07:25.000Z | """
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
# setup path
import azlmbr.legacy.general as general
import azlmbr.bus as bus
import azlmbr.editor as editor
import azlmbr.entity
import azlmbr.object
import azlmbr.math
import azlmbr.whitebox.api as api
from azlmbr.entity import EntityId
# get Component Type for WhiteBoxMesh
whiteBoxMeshComponentTypeId = get_white_box_component_type()
# use old White Box entity to hold White Box component if it exists, otherwise use a new one
newEntityId = None
oldEntityId = general.find_editor_entity('WhiteBox')
if oldEntityId.IsValid():
whiteBoxMeshComponentExists = editor.EditorComponentAPIBus(bus.Broadcast, 'HasComponentOfType', oldEntityId, whiteBoxMeshComponentTypeId)
if (whiteBoxMeshComponentExists):
oldwhiteBoxMeshComponent = editor.EditorComponentAPIBus(bus.Broadcast, 'GetComponentOfType', oldEntityId, whiteBoxMeshComponentTypeId)
editor.EditorComponentAPIBus(bus.Broadcast, 'RemoveComponents', [oldwhiteBoxMeshComponent.GetValue()])
newEntityId = oldEntityId
else:
newEntityId = editor.ToolsApplicationRequestBus(bus.Broadcast, 'CreateNewEntity', EntityId())
editor.EditorEntityAPIBus(bus.Event, 'SetName', newEntityId, "WhiteBox")
# add whiteBoxMeshComponent to entity and enable
whiteBoxMeshComponentOutcome = editor.EditorComponentAPIBus(bus.Broadcast, 'AddComponentsOfType', newEntityId, [whiteBoxMeshComponentTypeId])
if (whiteBoxMeshComponentOutcome.IsSuccess()):
print("White Box Component added to entity.")
whiteBoxMeshComponents = whiteBoxMeshComponentOutcome.GetValue()
whiteBoxMeshComponent = whiteBoxMeshComponents[0]
editor.EditorComponentAPIBus(bus.Broadcast, 'EnableComponents', whiteBoxMeshComponents)
isComponentEnabled = editor.EditorComponentAPIBus(bus.Broadcast, 'IsComponentEnabled', whiteBoxMeshComponent)
if (isComponentEnabled):
print("Enabled Mesh component.")
whiteBoxMesh = azlmbr.whitebox.request.bus.EditorWhiteBoxComponentRequestBus(bus.Event, 'GetWhiteBoxMeshHandle', whiteBoxMeshComponent)
# translate append (extrude) a polygon
if (len(sys.argv) >= 2 and float(sys.argv[2]) != 0.0):
# create face handle from user input (argv[1])
faceHandle = azlmbr.object.construct('FaceHandle', int(sys.argv[1]))
# find the polygon handle that corresponds to the given face
facePolygonHandle = whiteBoxMesh.FacePolygonHandle(faceHandle)
# translate append (extrude) the polygon by a distance specified by the user (argv[2])
whiteBoxMesh.TranslatePolygonAppend(facePolygonHandle, float(sys.argv[2]))
# recalculate uvs as mesh will have changed
whiteBoxMesh.CalculatePlanarUVs()
# notify the white box component the mesh has changed to force it to rebuild the render mesh
azlmbr.whitebox.notification.bus.EditorWhiteBoxComponentNotificationBus(bus.Event, 'OnWhiteBoxMeshModified', whiteBoxMeshComponent)
| 48.174603 | 155 | 0.801647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,046 | 0.344646 |
1de934d7356d8cdb5f92362c3fbc904ed493a5b2 | 7,800 | py | Python | src/analysis/analysis.py | rcgonzalez9061/m2v-adversarial-hindoird | 513ede9875330ea44de65f7f6b0f76e98c9c81da | [
"MIT"
] | null | null | null | src/analysis/analysis.py | rcgonzalez9061/m2v-adversarial-hindoird | 513ede9875330ea44de65f7f6b0f76e98c9c81da | [
"MIT"
] | null | null | null | src/analysis/analysis.py | rcgonzalez9061/m2v-adversarial-hindoird | 513ede9875330ea44de65f7f6b0f76e98c9c81da | [
"MIT"
] | 3 | 2021-03-08T08:01:24.000Z | 2022-02-23T01:12:26.000Z | from dask.distributed import Client
import dask.dataframe as dd
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from IPython.display import display, HTML
from sklearn.cluster import KMeans
import plotly
import plotly.graph_objs as go
import plotly.io as pio
from functools import partial
from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, recall_score
def make_groundtruth_figures(data_folder, update_figs=False, no_labels=False):
vectors = pd.read_csv(os.path.join(data_folder, 'features.csv'), index_col='app')
if no_labels: # mostly for testing
all_apps = vectors.assign(
label=['app', 'app'],
category=['app', 'app']
)
else:
all_apps = pd.read_csv("data/out/all-apps/all_apps.csv", index_col='app')
all_apps['label'] = all_apps[all_apps.category=='malware'].app_dir.str.split('/').apply(lambda list: list[5])
top_9_malware = all_apps.label.value_counts().sort_values(ascending=False)[:9]
top_9_min = top_9_malware.min()
other_mal_map = {key: "Other malware" for key, value in all_apps.label.value_counts().items() if value <= top_9_min}
# other_mal_map = {key: key for key, value in all_apps.label.value_counts().items() if value <= 200}
all_apps.label = all_apps.label.map(other_mal_map).fillna(all_apps.label)
all_apps.label.fillna(all_apps.category, inplace=True)
vectors = vectors.assign(
label=all_apps.label,
category=all_apps.category
)
labels = vectors.label
# Retrieve node embeddings and corresponding subjects
node_ids = list(vectors.uid) # list of node IDs
node_embeddings = vectors.drop(columns=['uid', 'category', 'label'])
node_targets = labels
transform = TSNE # Dimensionality reduction transformer
# 2D plot -- matplotlib
print('Making 2D plot...')
plt.rcParams.update({'font.size': 14})
trans = transform(n_components=2)
node_embeddings_2d = trans.fit_transform(node_embeddings)
label_map = {l: i for i, l in enumerate(np.unique(node_targets))}
node_colours = [label_map[target] for target in node_targets]
plt.figure(figsize=(10, 8))
plt.axes().set(aspect="equal")
scatter = plt.scatter(
node_embeddings_2d[:, 0],
node_embeddings_2d[:, 1],
c=node_colours,
cmap='tab20',
alpha=1,
s=5
)
plt.title("2D {} visualization of node embeddings".format(transform.__name__))
legend1 = plt.legend(scatter.legend_elements()[0], pd.Series(label_map.keys()).str.replace('-', ' ').str.title(),
loc='center left', bbox_to_anchor=(1, 0.5), title="App Type", markerscale=1.5)
# order labels (https://stackoverflow.com/a/46160465/13710014)
# handles, g_labels = plt.gca().get_legend_handles_labels()
# print(handles, labels)
# if not no_labels:
# order = ['Popular Apps', 'Random Apps']
# order += list(top_9_malware.index)
# plt.legend([handles[idx] for idx in order],[labels[idx] for idx in order])
plt.savefig(os.path.join(data_folder, '2D-plot.png'), bbox_inches='tight')
# 3D plot - using plotly
print('Making 3D plot...')
trans3d = transform(n_components=3)
node_embeddings_3d = trans3d.fit_transform(node_embeddings)
data_3d = pd.DataFrame(node_embeddings_3d, index=vectors.index)
data_3d['malware'] = vectors['category']=='malware'
data_3d['type'] = vectors.label
type_chart = data_3d[['malware', 'type']].drop_duplicates()
type_chart['num'] = type_chart.type.map(label_map)
layout = go.Layout(
title="Interactive 3D TNSE representation of node embeddings",
margin={'l': 0, 'r': 0, 'b': 0, 't': 30},
legend=dict(y=0.5, itemsizing='constant'),
scene={
'xaxis': {
'showspikes': False,
'showgrid': False,
'zeroline': False,
'visible': False
},
'yaxis': {
'showspikes': False,
'showgrid': False,
'zeroline': False,
'visible': False
},
'zaxis': {
'showspikes': False,
'showgrid': False,
'zeroline': False,
'visible': False
}
}
)
fig = go.Figure(layout=layout)
# add invisible bounding trace to keep axes' scale constant
fig.add_trace(
go.Scatter3d(
x=[data_3d[0].min(), data_3d[0].max()],
y=[data_3d[1].min(), data_3d[1].max()],
z=[data_3d[2].min(), data_3d[2].max()],
mode='markers',
marker={
'color':'rgba(0,0,0,0)',
'opacity': 0,
},
showlegend=False
)
)
for index, row in type_chart.sort_values('num', ascending=False).iterrows():
if row['malware']:
symbol = 'circle'
group='Malware'
size = 2
else:
symbol = 'x'
group='Unlabeled'
size = 1.5
name = f"{group}, {row['type'].replace('-', ' ').title()}"
if row['type']=='Other malware':
name=row['type']
df = data_3d[data_3d.type==row['type']]
rbg = tuple([255*val for val in cm.tab20(row['num'])[:3]])
color = f"rgb{rbg}"
trace = go.Scatter3d(
name=name,
x=df[0],
y=df[1],
z=df[2],
customdata=list(df.index),
hovertemplate=
"<b>%{customdata}</b><br>" +
f"{name}" +
"<extra></extra>",
mode='markers',
marker={
'size': size,
'opacity': 1,
'color': color,
'symbol': symbol,
},
)
fig.add_trace(trace)
# Save the plot.
pio.write_html(fig, file=os.path.join(data_folder, '3D-plot.html'), auto_open=True)
if update_figs:
pio.write_html(fig, file=os.path.join('docs', '_includes', '3D-plot.html'), auto_open=True)
def compute_model_performance_statistics(pred, true):
'''
Returns a series with the f1-score, accuracy, recall, and confusion counts (TP, TN, FP, FN).
'''
TN, FP, FN, TP = confusion_matrix(true, pred).ravel()
return pd.Series({
'ACC': accuracy_score(true, pred),
'TPR': recall_score(true, pred),
'F1': f1_score(true, pred),
'TP': TP,
'TN': TN,
'FP': FP,
'FN': FN
})
def create_performance_table(m2v_results_path, hindroid_results_path, outpath=None):
results = pd.read_csv(m2v_results_path, index_col='app', usecols=['app', 'm2vDroid', 'true'])
if 'true' in results.columns:
results = results.drop(columns=['true'])
results = results.join(pd.read_csv(hindroid_results_path, index_col='app'))
y_true = results.true
table = results.drop(columns=['true']).apply(partial(compute_model_performance_statistics, true=y_true)).T
table = table.astype({col: int for col in ['TP', 'TN', 'FP', 'FN']})
if outpath is not None:
table.to_csv(outpath)
return table
def generate_analysis(data_path, jobs={}):
"Generates plots, aggregates, and statistical analysis on app data located in `data_path`"
# load data
# app_data_path = os.path.join(data_path, 'app_data.csv')
# app_data = dd.read_csv(app_data_path)
# os.makedirs(out_folder, exist_ok=True)
if "plots" in jobs:
make_groundtruth_figures(data_path, **jobs['plots'])
| 35.294118 | 124 | 0.591667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,060 | 0.264103 |
1de9e88ceb68a4275423c2572fd161fe8abe5541 | 1,756 | py | Python | common/sqlmanager.py | ntcat/tilde | 6bbe7f56483e3a64103c3d3e7a39681a2c2690be | [
"MIT"
] | 1 | 2018-05-21T04:41:00.000Z | 2018-05-21T04:41:00.000Z | common/sqlmanager.py | ntcat/tilde | 6bbe7f56483e3a64103c3d3e7a39681a2c2690be | [
"MIT"
] | null | null | null | common/sqlmanager.py | ntcat/tilde | 6bbe7f56483e3a64103c3d3e7a39681a2c2690be | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 'shen.bas'
__time__ = '2018-01-26'
"""
import pymysql
class SQLManager:
def __init__(self, dbCnfig):
self.DB_CONFIG = dbCnfig
self.conn = None
self.cursor = None
self.msg = ''
self.affect_rows = 0
if not self.connect():
exit() #连接失败,退出
def connect(self):
try:
if len(self.DB_CONFIG) == 0:
self.msg = '数据库连接串为空,检查server、conn标签参数。\n'
return False
else:
self.conn = pymysql.connect(host=self.DB_CONFIG['host'],
port=int(self.DB_CONFIG['port']),
user=self.DB_CONFIG['user'],
passwd=self.DB_CONFIG['passwd'],
db=self.DB_CONFIG['db'],
charset=self.DB_CONFIG['charset'])
self.cursor = self.conn.cursor(cursor=pymysql.cursors.DictCursor)
return True
except Exception as e:
print('\nmysql connect failed:\n',str(e))
return False
# 查询多条数据
def get_list(self, sql, args=None):
self.cursor.execute(sql, args)
result = self.cursor.fetchall()
return result
# 查询单条数据
def get_one(self, sql, args=None):
try:
self.cursor.execute(sql, args)
result = self.cursor.fetchone()
return result
except Exception as e:
self.msg = str(e)
return False
# 执行单条SQL语句
def modify(self, sql, args=None):
self.cursor.execute(sql, args)
self.rows_affected = self.cursor.rowcount
self.conn.commit()
# 创建单条记录的语句
def create(self, sql, args=None):
self.cursor.execute(sql, args)
self.conn.commit()
last_id = self.cursor.lastrowid
return last_id
# 关闭数据库cursor和连接
def close(self):
self.cursor.close()
self.conn.close()
# 进入with语句自动执行
def __enter__(self):
return self
# 退出with语句块自动执行
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
| 22.227848 | 69 | 0.664009 | 1,774 | 0.929769 | 0 | 0 | 0 | 0 | 0 | 0 | 456 | 0.238994 |
1dea6f53071178d05a953eb23c32d6609a27d012 | 1,786 | py | Python | rls/algorithms/single/ddqn.py | StepNeverStop/RLs | 25cc97c96cbb19fe859c9387b7547cbada2c89f2 | [
"Apache-2.0"
] | 371 | 2019-04-26T00:37:33.000Z | 2022-03-31T07:33:12.000Z | rls/algorithms/single/ddqn.py | BlueFisher/RLs | 25cc97c96cbb19fe859c9387b7547cbada2c89f2 | [
"Apache-2.0"
] | 47 | 2019-07-21T11:51:57.000Z | 2021-08-31T08:45:22.000Z | rls/algorithms/single/ddqn.py | BlueFisher/RLs | 25cc97c96cbb19fe859c9387b7547cbada2c89f2 | [
"Apache-2.0"
] | 102 | 2019-06-29T13:11:15.000Z | 2022-03-28T13:51:04.000Z | #!/usr/bin/env python3
# encoding: utf-8
import torch.nn.functional as F
from rls.algorithms.single.dqn import DQN
from rls.common.decorator import iton
from rls.utils.torch_utils import n_step_return
class DDQN(DQN):
"""
Double DQN, https://arxiv.org/abs/1509.06461
Double DQN + LSTM, https://arxiv.org/abs/1908.06040
"""
policy_mode = 'off-policy'
def __init__(self, **kwargs):
super().__init__(**kwargs)
@iton
def _train(self, BATCH):
q = self.q_net(BATCH.obs, begin_mask=BATCH.begin_mask) # [T, B, A]
q_next = self.q_net(BATCH.obs_, begin_mask=BATCH.begin_mask) # [T, B, A]
q_target_next = self.q_net.t(BATCH.obs_, begin_mask=BATCH.begin_mask) # [T, B, A]
next_max_action = q_next.argmax(-1) # [T, B]
next_max_action_one_hot = F.one_hot(next_max_action.squeeze(), self.a_dim).float() # [T, B, A]
q_eval = (q * BATCH.action).sum(-1, keepdim=True) # [T, B, 1]
q_target_next_max = (q_target_next * next_max_action_one_hot).sum(-1, keepdim=True) # [T, B, 1]
q_target = n_step_return(BATCH.reward,
self.gamma,
BATCH.done,
q_target_next_max,
BATCH.begin_mask).detach() # [T, B, 1]
td_error = q_target - q_eval # [T, B, 1]
q_loss = (td_error.square() * BATCH.get('isw', 1.0)).mean() # 1
self.oplr.optimize(q_loss)
return td_error, {
'LEARNING_RATE/lr': self.oplr.lr,
'LOSS/loss': q_loss,
'Statistics/q_max': q_eval.max(),
'Statistics/q_min': q_eval.min(),
'Statistics/q_mean': q_eval.mean()
}
| 39.688889 | 105 | 0.558231 | 1,569 | 0.878499 | 0 | 0 | 1,315 | 0.736282 | 0 | 0 | 370 | 0.207167 |
1deabd234d3d7ab0242074f8b32e1bcacc5fabc3 | 1,394 | py | Python | blockChain/modularDivision.py | slowy07/pythonApps | 22f9766291dbccd8185035745950c5ee4ebd6a3e | [
"MIT"
] | 10 | 2020-10-09T11:05:18.000Z | 2022-02-13T03:22:10.000Z | blockChain/modularDivision.py | slowy07/pythonApps | 22f9766291dbccd8185035745950c5ee4ebd6a3e | [
"MIT"
] | null | null | null | blockChain/modularDivision.py | slowy07/pythonApps | 22f9766291dbccd8185035745950c5ee4ebd6a3e | [
"MIT"
] | 6 | 2020-11-26T12:49:43.000Z | 2022-03-06T06:46:43.000Z | from typing import Tuple
def modular_division(a: int, b: int, n: int) -> int:
assert n > 1 and a > 0 and greatest_common_divisor(a, n) == 1
(d, t, s) = extend_gcd(n, a)
return x
def invert_modulo(a: int, n: int) -> int:
(b, x) = extend_euclid(a ,n)
if b < 0:
b = (b % n + n) % n
return b
def modular_division(a: int, b: int, n: int) -> int:
s = invert_modulo(a, n)
x = (b *s) % n
return x
def extend_gcd(a: int, b:int) -> Tuple[int, int, int]:
assert a >= 0 and b >= 0
if b == 0:
d, x, y = a, 1, 0
else:
(d, p, q) = extend_gcd(b, a%b)
assert a % d == 0 and b %d == 0
assert d == a * x + b * y
return (d, x, y)
def extended_euclid(a: int, b: int) -> Tuple[int, int]:
if b == 0:
return (1, 0)
(x, y) = extended_euclid(b, a % b)
k = a // b
return (y, x - k * y)
def greatest_common_divisor(a: int, b: int) -> int:
if a < b:
a, b = b, a
while a % b != 0:
a, b = b, a % b
return b
if __name__ == "__main__":
from doctest import testmod
testmod(name="modular_division", verbose=True)
testmod(name="modular_division2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_gcd", verbose=True)
testmod(name="extended_euclid", verbose=True)
testmod(name="greatest_common_divisor", verbose=True)
| 24.45614 | 65 | 0.552367 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.084648 |
1dead52e33a4564094a52bf02b01c565cce3acb2 | 5,373 | py | Python | arekit/contrib/networks/context/architectures/cnn.py | nicolay-r/AREk | 19c39ec0dc9a17464cade03b9c4da0c6d1d21191 | [
"MIT"
] | 18 | 2019-12-14T18:43:11.000Z | 2022-03-21T05:55:36.000Z | arekit/contrib/networks/context/architectures/cnn.py | nicolay-r/AREk | 19c39ec0dc9a17464cade03b9c4da0c6d1d21191 | [
"MIT"
] | 284 | 2020-08-08T20:52:44.000Z | 2022-03-31T05:26:20.000Z | arekit/contrib/networks/context/architectures/cnn.py | nicolay-r/AREk | 19c39ec0dc9a17464cade03b9c4da0c6d1d21191 | [
"MIT"
] | 1 | 2021-08-07T13:17:43.000Z | 2021-08-07T13:17:43.000Z | import tensorflow as tf
from collections import OrderedDict
from arekit.contrib.networks.context.architectures.base.base import SingleInstanceNeuralNetwork
from arekit.contrib.networks.context.configurations.cnn import CNNConfig
from arekit.contrib.networks.tf_helpers import layers
class VanillaCNN(SingleInstanceNeuralNetwork):
"""
Title: Relation Classification via Convolutional Deep Neural Network
Authors: Daojian Zeng, Kang Liu, Siwei Lai, Guangyou Zhou and Jun Zhao
Paper: https://www.aclweb.org/anthology/C14-1220/
Source: https://github.com/roomylee/cnn-relation-extraction
NOTE: This class is an unofficial implementation of CNN with distance features.
"""
H_W = "W"
H_b = "b"
H_W2 = "W2"
H_b2 = "b2"
H_conv_filter = "C"
def __init__(self):
super(VanillaCNN, self).__init__()
self.__hidden = OrderedDict()
@property
def Hidden(self):
return self.__hidden
@property
def ContextEmbeddingSize(self):
return self.Config.FiltersCount
def init_context_embedding(self, embedded_terms):
embedding = self.init_context_embedding_core(embedded_terms)
return tf.concat(embedding, axis=-1)
def init_context_embedding_core(self, embedded_terms):
embedded_terms = self.padding(embedded_terms, self.Config.WindowSize)
bwc_line = tf.reshape(embedded_terms,
[self.Config.BatchSize,
(self.Config.TermsPerContext + (self.Config.WindowSize - 1)) * self.TermEmbeddingSize,
1])
bwc_conv = tf.nn.conv1d(bwc_line, self.__hidden[self.H_conv_filter], self.TermEmbeddingSize,
"VALID",
data_format="NHWC",
name="C")
bwgc_conv = tf.reshape(bwc_conv, [self.Config.BatchSize,
1,
self.Config.TermsPerContext,
self.Config.FiltersCount])
# Max Pooling
bwgc_mpool = tf.nn.max_pool(
bwgc_conv,
[1, 1, self.Config.TermsPerContext, 1],
[1, 1, self.Config.TermsPerContext, 1],
padding='VALID',
data_format="NHWC")
bc_mpool = tf.squeeze(bwgc_mpool, axis=[1, 2])
g = tf.reshape(bc_mpool, [self.Config.BatchSize, self.Config.FiltersCount])
return g
def init_logits_unscaled(self, context_embedding):
W = [tensor for var_name, tensor in self.__hidden.items() if 'W' in var_name]
b = [tensor for var_name, tensor in self.__hidden.items() if 'b' in var_name]
activations = [tf.tanh] * len(W)
activations.append(None)
result, result_dropout = layers.get_k_layer_pair_logits(g=context_embedding,
W=W,
b=b,
dropout_keep_prob=self.DropoutKeepProb,
activations=activations)
return result, result_dropout
def init_body_dependent_hidden_states(self):
assert(isinstance(self.Config, CNNConfig))
self.__hidden[self.H_conv_filter] = tf.get_variable(
name=self.H_conv_filter,
shape=[self.Config.WindowSize * self.TermEmbeddingSize, 1, self.Config.FiltersCount],
initializer=self.Config.WeightInitializer,
regularizer=self.Config.LayerRegularizer,
dtype=tf.float32)
def init_logits_hidden_states(self):
assert(isinstance(self.Config, CNNConfig))
self.__hidden[self.H_W] = tf.get_variable(
name=self.H_W,
shape=[self.ContextEmbeddingSize, self.Config.HiddenSize],
initializer=self.Config.WeightInitializer,
regularizer=self.Config.LayerRegularizer,
dtype=tf.float32)
self.__hidden[self.H_b] = tf.get_variable(
name=self.H_b,
shape=[self.Config.HiddenSize],
initializer=self.Config.BiasInitializer,
dtype=tf.float32)
self.__hidden[self.H_W2] = tf.get_variable(
name=self.H_W2,
shape=[self.Config.HiddenSize, self.Config.ClassesCount],
initializer=self.Config.WeightInitializer,
regularizer=self.Config.LayerRegularizer,
dtype=tf.float32)
self.__hidden[self.H_b2] = tf.get_variable(
name=self.H_b2,
shape=[self.Config.ClassesCount],
initializer=self.Config.BiasInitializer,
regularizer=self.Config.LayerRegularizer,
dtype=tf.float32)
def iter_hidden_parameters(self):
for key, value in self.__hidden.items():
yield key, value
@staticmethod
def padding(embedded_data, window_size):
assert(isinstance(window_size, int) and window_size > 0)
left_padding = int((window_size - 1) / 2)
right_padding = (window_size - 1) - left_padding
return tf.pad(embedded_data, [[0, 0],
[left_padding, right_padding],
[0, 0]])
| 39.507353 | 117 | 0.589615 | 5,087 | 0.946771 | 111 | 0.020659 | 538 | 0.10013 | 0 | 0 | 427 | 0.079471 |
1deb8efbdd96d38511ba7695b577776dc1d7e9c2 | 149 | py | Python | data/hospital_level/raw/hifld_hospital/download.py | csinva/covid-19-analysis | e7b1e82cb6b25d62a868ff61025d88e17452de28 | [
"MIT"
] | 2 | 2020-03-24T16:50:02.000Z | 2020-03-24T17:00:50.000Z | data_new/hospital_level/raw/hifld_hospital/download.py | rahul263-stack/covid19-severity-prediction | f581adb2fccb12d5ab3f3c59ee120f484703edf5 | [
"MIT"
] | 1 | 2020-03-28T15:34:28.000Z | 2020-03-28T19:22:27.000Z | data/hospital_level/raw/hifld_hospital/download.py | Yu-Group/covid-19-ventilator-demand-prediction | e7b1e82cb6b25d62a868ff61025d88e17452de28 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import os
os.system('wget \
https://opendata.arcgis.com/datasets/6ac5e325468c4cb9b905f1728d6fbf0f_0.csv \
-O hifld_hospital.csv')
| 24.833333 | 77 | 0.791946 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 126 | 0.845638 |
1def47cb7540794f0847dc42e0919f49e98df7e1 | 458 | py | Python | src/helper.py | yiuc/azure-aud | 695341178c1396980a75fd2ab88550ca7930c70e | [
"Apache-2.0"
] | 2 | 2019-01-24T12:23:28.000Z | 2021-01-19T15:04:35.000Z | src/helper.py | yiuc/azure-aud | 695341178c1396980a75fd2ab88550ca7930c70e | [
"Apache-2.0"
] | null | null | null | src/helper.py | yiuc/azure-aud | 695341178c1396980a75fd2ab88550ca7930c70e | [
"Apache-2.0"
] | 1 | 2021-05-26T17:34:51.000Z | 2021-05-26T17:34:51.000Z | import argparse
import subprocess
import sys
import logging
logger = logging.getLogger("helper")
def azcli(command):
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out,err = process.communicate()
logger.debug(str(out,"utf-8"))
exit_code = process.returncode
if exit_code and exit_code != 0:
logger.error("{}".format(str(err,"utf-8")))
sys.exit(exit_code)
else:
return out | 26.941176 | 87 | 0.687773 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.056769 |
1def7d7b2c472095eabd58e752a36f75a3a587c3 | 1,622 | py | Python | 044_wildcat_matching.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
] | 2 | 2018-04-24T19:17:40.000Z | 2018-04-24T19:33:52.000Z | 044_wildcat_matching.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
] | null | null | null | 044_wildcat_matching.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
] | 3 | 2020-06-17T05:48:52.000Z | 2021-01-02T06:08:25.000Z | # 44. Wildcard Matching
#
# Implement wildcard pattern matching with support for '?' and '*'.
#
# '?' Matches any single character.
# '*' Matches any sequence of characters (including the empty sequence).
#
# The matching should cover the entire input string (not partial).
#
# The function prototype should be:
# bool isMatch(const char *s, const char *p)
#
# Some examples:
# isMatch("aa","a") → false
# isMatch("aa","aa") → true
# isMatch("aaa","aa") → false
# isMatch("aa", "*") → true
# isMatch("aa", "a*") → true
# isMatch("ab", "?*") → true
# isMatch("aab", "c*a*b") → false
class Solution:
# @param {string} s
# @param {string} p
# @return {boolean}
#
# http://www.voidcn.com/article/p-hgfivloj-bhv.html
def isMatch(self, s, p):
i = 0
j = 0
sstar = 0
star = -1
while i < len(s):
# compare ? or whether they are the same
if j < len(p) and (s[i] == p[i] or p[j] == '?'):
i += 1
j += 1
# if there is a * in p we mark current j and i
elif j < len(p) and p[j] == '*':
star = j
j += 1
sstar = i
# if current p[j] is not * we check whether prior state has *
elif star != -1:
j = star + 1
sstar += 1
i = sstar
else:
return False
while j < len(p) and p[j] == '*':
j += 1
# return j == len(p)
if j == len(p):
return True
return False
print(Solution().isMatch("ab", "?*"))
| 27.033333 | 73 | 0.479655 | 998 | 0.610024 | 0 | 0 | 0 | 0 | 0 | 0 | 869 | 0.531174 |
1df20dac7a824f73870c3a8bb0561dbeba1a3f8c | 1,013 | py | Python | einsteinsolid.py | SNOlson/MyCodes | 7d2942d961b638bc34e8d7aea86bfbcaa0326a8d | [
"Apache-2.0"
] | null | null | null | einsteinsolid.py | SNOlson/MyCodes | 7d2942d961b638bc34e8d7aea86bfbcaa0326a8d | [
"Apache-2.0"
] | null | null | null | einsteinsolid.py | SNOlson/MyCodes | 7d2942d961b638bc34e8d7aea86bfbcaa0326a8d | [
"Apache-2.0"
] | null | null | null | """Table for 200 and 100 oscillators with 100 units of energy"""
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def mult(n, r):
return math.factorial(n) / (math.factorial(r) * math.factorial(n-r))
nA = 200
nB = 100
qA = np.arange(0,101,1)
qB = np.arange(101, 0, -1)
#multsA = [mult(n = i+nA-1, r = i) for i in qA]
#multsB = [mult(n = j+nB-1, r = i) for j in qB]
multA = []
multB = []
for i in qA:
multA.append(mult(n = i+nA-1, r = i))
for i in qB:
multB.append(mult(n = i + nB - 1, r=i))
multsA = np.asarray(multA)
multsB = np.asarray(multB)
#multsA = np.asarray(list(map(mult(n = qA+nA-1,r = qA))))
#multsB = np.asarray(list(map(mult(n = qB+nB-1,r = qB))))
Einstein = pd.DataFrame(data = [qA, multsA, qB, multsB, multsA*multsB])
#plt.plot(numHeads, probs)
#plt.title("Probabilites of getting n heads flipping 50 coins")
#plt.xlabel("n Heads")
#plt.ylabel("Probability")
#plt.show()
plt.plot(qA, Einstein.T[4])
| 23.55814 | 73 | 0.617966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 429 | 0.423495 |
1df2a0598e91ae4e8513a3d7be0aab30ae5f3267 | 1,184 | py | Python | Grid.py | KeilonRobertson/Pathfinding-Visualizer | 71637fbadfd4f4dc04ba82d0d7429738f27a4a74 | [
"MIT"
] | 5 | 2020-10-02T03:03:49.000Z | 2020-10-08T16:45:24.000Z | Grid.py | KeilonRobertson/Pathfinding-Visualizer | 71637fbadfd4f4dc04ba82d0d7429738f27a4a74 | [
"MIT"
] | null | null | null | Grid.py | KeilonRobertson/Pathfinding-Visualizer | 71637fbadfd4f4dc04ba82d0d7429738f27a4a74 | [
"MIT"
] | null | null | null | from Vertex import Vertex
import pygame
from Colours import Colours
class Grid:
def createGrid(self, rows, width):
grid = []
space = width // rows
for x in range(rows):
grid.append([])
for i in range(rows):
vertex = Vertex(space, rows, x, i)
grid[x].append(vertex)
return grid
def generateGrid(self, width, rows, window):
space = width // rows
for x in range(rows):
pygame.draw.line(window, Colours.BLACK, (0, x * space), (width, x * space))
for i in range(rows):
pygame.draw.line(window, Colours.BLACK, (i * space, 0), (i * space, width))
def colour(self, window, grid, rows, width):
window.fill(Colours.WHITE)
for row in grid:
for vertex in row:
vertex.createVertex(window)
self.generateGrid(width, rows, window)
pygame.display.update()
def getClicked(self, position, rows, width):
space = width // rows
y, x = position
column = x // space
row = y // space
return row, column
| 29.6 | 92 | 0.529561 | 1,109 | 0.936655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1df2a9b981356894b69c9da96fea58cbc93cb744 | 10,811 | py | Python | improver_tests/precipitation_type/shower_condition_probability/test_ShowerConditionProbability.py | thbom001/improver | 6f6e334a1e8e44a151125cf123ecdda2c56dbec4 | [
"BSD-3-Clause"
] | null | null | null | improver_tests/precipitation_type/shower_condition_probability/test_ShowerConditionProbability.py | thbom001/improver | 6f6e334a1e8e44a151125cf123ecdda2c56dbec4 | [
"BSD-3-Clause"
] | null | null | null | improver_tests/precipitation_type/shower_condition_probability/test_ShowerConditionProbability.py | thbom001/improver | 6f6e334a1e8e44a151125cf123ecdda2c56dbec4 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for ShowerConditionProbability plugin"""
from typing import Dict, List, Tuple, Union
import numpy as np
import pytest
from iris.cube import CubeList
from numpy import ndarray
from improver.metadata.constants import FLOAT_DTYPE
from improver.precipitation_type.shower_condition_probability import (
ShowerConditionProbability,
)
from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube
ATTRIBUTES = {
"institution": "Met Office",
"mosg__model_configuration": "gl_ens",
"source": "Met Office Unified Model",
"title": "MOGREPS-G Forecast on UK 2 km Standard Grid",
}
EXPECTED_ATTRIBUTES = {
"institution": "Met Office",
"source": "Met Office Unified Model",
"title": "Post-Processed MOGREPS-G Forecast on UK 2 km Standard Grid",
}
MODEL_ID_ATTR_ATTRIBUTES = EXPECTED_ATTRIBUTES.copy()
MODEL_ID_ATTR_ATTRIBUTES.update({"mosg__model_configuration": "gl_ens"})
@pytest.fixture(name="test_cubes")
def cube_fixture(cube_properties: Tuple[Dict[str, Dict[str, Union[List, ndarray]]]]):
"""Create a test cube"""
cubes = CubeList()
for name, values in cube_properties.items():
cubes.append(
set_up_variable_cube(
values["data"],
name=name,
units=1,
realizations=values["realizations"],
attributes=ATTRIBUTES,
)
)
return cubes
@pytest.mark.parametrize(
"cube_properties, kwargs, expected",
(
# Simple case with one realization, cloud dominates returned
# probabilities (i.e. clear skies).
(
{
"low_and_medium_type_cloud_area_fraction": {
"data": np.zeros((2, 2)).astype(FLOAT_DTYPE),
"realizations": [0],
},
"convective_ratio": {
"data": np.zeros((2, 2)).astype(FLOAT_DTYPE),
"realizations": [0],
},
},
# Other plugin kwargs
{"cloud_threshold": 0.5, "convection_threshold": 0.5},
# Expected result
(np.ones((2, 2)).astype(FLOAT_DTYPE), EXPECTED_ATTRIBUTES),
),
# As above, but using the model_id_attr keyword to preserve the model
# information.
(
{
"low_and_medium_type_cloud_area_fraction": {
"data": np.zeros((2, 2)).astype(FLOAT_DTYPE),
"realizations": [0],
},
"convective_ratio": {
"data": np.zeros((2, 2)).astype(FLOAT_DTYPE),
"realizations": [0],
},
},
# Other plugin kwargs
{
"model_id_attr": "mosg__model_configuration",
"cloud_threshold": 0.5,
"convection_threshold": 0.5,
},
# Expected result
(np.ones((2, 2)).astype(FLOAT_DTYPE), MODEL_ID_ATTR_ATTRIBUTES),
),
# Simple case with one realization, convection dominates returned
# probabilities.
(
{
"low_and_medium_type_cloud_area_fraction": {
"data": np.ones((2, 2)).astype(FLOAT_DTYPE),
"realizations": [0],
},
"convective_ratio": {
"data": np.ones((2, 2)).astype(FLOAT_DTYPE),
"realizations": [0],
},
},
# Other plugin kwargs
{"cloud_threshold": 0.5, "convection_threshold": 0.5},
# Expected result
(np.ones((2, 2)).astype(FLOAT_DTYPE), EXPECTED_ATTRIBUTES),
),
# As above, but the convective_ratio includes masked values. This test
# checks that they are ignored in setting the resulting probabilities
# and that the output is not masked. One resulting value differs to the
# above, corresponding to the masked point.
(
{
"low_and_medium_type_cloud_area_fraction": {
"data": np.ones((2, 2)).astype(FLOAT_DTYPE),
"realizations": [0],
},
"convective_ratio": {
"data": np.ma.masked_array(
np.ones((2, 2)).astype(FLOAT_DTYPE),
mask=np.array([[0, 0], [0, 1]]),
),
"realizations": [0],
},
},
# Other plugin kwargs
{"cloud_threshold": 0.5, "convection_threshold": 0.5},
# Expected result
(np.array([[1, 1], [1, 0]]).astype(FLOAT_DTYPE), EXPECTED_ATTRIBUTES),
),
# Multi-realization case with a range of probabilities returned due
# to variable cloud.
(
{
"low_and_medium_type_cloud_area_fraction": {
"data": np.array(
[[[0.4, 0.6], [0.4, 0.6]], [[0.6, 0.6], [0.6, 0.6]]]
).astype(FLOAT_DTYPE),
"realizations": [0, 1],
},
"convective_ratio": {
"data": np.zeros((2, 2, 2)).astype(FLOAT_DTYPE),
"realizations": [0, 1],
},
},
# Other plugin kwargs
{"cloud_threshold": 0.5, "convection_threshold": 0.5},
# Expected result
(np.array([[0.5, 0], [0.5, 0]]).astype(FLOAT_DTYPE), EXPECTED_ATTRIBUTES),
),
# Same as above, but with different threshold values applied.
# Cloud =< 0.7, which will result in probabilities all equal to 1.
(
{
"low_and_medium_type_cloud_area_fraction": {
"data": np.array(
[[[0.4, 0.6], [0.4, 0.6]], [[0.6, 0.6], [0.6, 0.6]]]
).astype(FLOAT_DTYPE),
"realizations": [0, 1],
},
"convective_ratio": {
"data": np.zeros((2, 2, 2)).astype(FLOAT_DTYPE),
"realizations": [0, 1],
},
},
# Other plugin kwargs
{"cloud_threshold": 0.7, "convection_threshold": 0.5},
# Expected result
(np.ones((2, 2)).astype(FLOAT_DTYPE), EXPECTED_ATTRIBUTES),
),
# Multi-realization case with cloud and convection both providing a
# showery probability of 1.
(
{
"low_and_medium_type_cloud_area_fraction": {
"data": np.array([[[0, 1], [1, 1]], [[0, 1], [1, 1]]]).astype(
FLOAT_DTYPE
),
"realizations": [0, 1],
},
"convective_ratio": {
"data": np.array([[[0, 0], [0, 1]], [[0, 0], [0, 1]]]).astype(
FLOAT_DTYPE
),
"realizations": [0, 1],
},
},
# Other plugin kwargs
{"cloud_threshold": 0.5, "convection_threshold": 0.5},
# Expected result
(np.array([[1, 0], [0, 1]]).astype(FLOAT_DTYPE), EXPECTED_ATTRIBUTES),
),
),
)
def test_scenarios(test_cubes, kwargs, expected):
"""Test output type and metadata"""
expected_shape = test_cubes[0].shape[-2:]
result = ShowerConditionProbability(**kwargs)(test_cubes)
assert result.name() == "probability_of_shower_condition_above_threshold"
assert result.units == "1"
assert result.shape == expected_shape
assert result.data.dtype == FLOAT_DTYPE
assert (result.data == expected[0]).all()
assert result.attributes == expected[1]
assert result.coord(var_name="threshold").name() == "shower_condition"
assert result.coord(var_name="threshold").points == 1.0
def test_incorrect_inputs_exception():
"""Tests that the expected exception is raised for incorrectly named
input cubes."""
temperature = set_up_variable_cube(np.ones((2, 2)).astype(FLOAT_DTYPE))
expected = (
"A cloud area fraction and convective ratio are required, "
f"but the inputs were: {temperature.name()}, {temperature.name()}"
)
with pytest.raises(ValueError, match=expected):
ShowerConditionProbability()(CubeList([temperature, temperature]))
def test_mismatched_shape_exception():
"""Tests that the expected exception is raised for cloud and convection
cubes of different shapes."""
cloud = set_up_variable_cube(
np.ones((2, 2)).astype(FLOAT_DTYPE),
name="low_and_medium_type_cloud_area_fraction",
)
convection = set_up_variable_cube(
np.ones((3, 3)).astype(FLOAT_DTYPE), name="convective_ratio"
)
expected = (
"The cloud area fraction and convective ratio cubes are not the same "
"shape and cannot be combined to generate a shower probability"
)
with pytest.raises(ValueError, match=expected):
ShowerConditionProbability()(CubeList([cloud, convection]))
| 39.456204 | 86 | 0.565998 | 0 | 0 | 0 | 0 | 7,018 | 0.649154 | 0 | 0 | 4,810 | 0.444917 |
1df3ba41e191af85d9097217c0ce64ae0929f14a | 4,774 | py | Python | controllers/vessel/file/upload.py | gbf-labs/rh-api | 317a812164ad8943ab638c06f61723cb928bfd12 | [
"Apache-2.0"
] | null | null | null | controllers/vessel/file/upload.py | gbf-labs/rh-api | 317a812164ad8943ab638c06f61723cb928bfd12 | [
"Apache-2.0"
] | 6 | 2020-03-30T23:11:27.000Z | 2022-03-12T00:21:45.000Z | controllers/vessel/file/upload.py | gbf-labs/rh-api | 317a812164ad8943ab638c06f61723cb928bfd12 | [
"Apache-2.0"
] | null | null | null | # pylint: disable=no-member, too-many-locals, no-self-use
"""Vessels File Upload """
import time
from flask import request
# from library.couch_database import CouchDatabase
from library.postgresql_queries import PostgreSQL
from library.couch_queries import Queries
from library.common import Common
from library.aws_s3 import AwsS3
class Upload(Common):
"""Class for Vessels"""
# INITIALIZE
def __init__(self):
"""The Constructor for Vessel Upload class"""
self.couch_query = Queries()
self.postgres = PostgreSQL()
self.aws3 = AwsS3()
super(Upload, self).__init__()
# GET VESSEL FUNCTION
def file_upload(self):
"""
This API is for Uploading Vessel File
---
tags:
- Vessel
produces:
- application/json
parameters:
- name: token
in: header
description: Token
required: true
type: string
- name: userid
in: header
description: User ID
required: true
type: string
- name: vessel_id
in: query
description: Vessel ID
required: true
type: string
responses:
500:
description: Error
200:
description: Vessel File Upload
"""
# INIT DATA
data = {}
# VESSEL ID
vessel_id = request.args.get('vessel_id')
# # GET DATA
token = request.headers.get('token')
userid = request.headers.get('userid')
# CHECK TOKEN
token_validation = self.validate_token(token, userid)
if not token_validation:
data["alert"] = "Invalid Token"
data['status'] = 'Failed'
# RETURN ALERT
return self.return_data(data)
# RH_<VesselIMO>_<ImageID>
parameters = self.couch_query.get_complete_values(
vessel_id,
"PARAMETERS"
)
# VESSEL IMO
vessel_imo = parameters['PARAMETERS']['INFO']['IMO']
file_upload = []
filenames = request.files.getlist('upfile')
for filename in filenames:
try:
file_name = filename.filename
# ext = file_name.split(".")[-1]
# if not self.allowed_file_type(file_name):
# data["alert"] = "File Type Not Allowed!"
# data['status'] = 'Failed'
# return self.return_data(data)
except ImportError:
data["alert"] = "No image!"
data['status'] = 'Failed'
# RETURN ALERT
return self.return_data(data)
file_name = self.rename_file(vessel_id, file_name)
vimg_data = {}
vimg_data['vessel_id'] = vessel_id
vimg_data['vessel_imo'] = vessel_imo
vimg_data['file_name'] = file_name
vimg_data['status'] = "active"
vimg_data['created_on'] = time.time()
# ADD FILE TO VESSEL FILE TABLE
self.postgres.insert('vessel_file', vimg_data, 'vessel_file_id')
# FILE NAME
# file_name_upload = str(vessel_file_id) + "." + ext
# upload_file = 'VesselFiles/' + "RH_" + vessel_imo + "_" + file_name_upload
upload_file = 'VesselFiles/' + vessel_imo +"/" + file_name
body = request.files['upfile']
# SAVE TO S3
url = ""
if self.aws3.save_file(upload_file, body):
url = self.aws3.get_url(upload_file)
file_upload.append({
"filename": file_name,
"url": url
})
data["status"] = "ok"
data["data"] = file_upload
# RETURN
return self.return_data(data)
def allowed_file_type(self, filename):
""" Check Allowed File Extension """
allowed_extensions = set(['txt', 'pdf'])
return '.' in filename and filename.rsplit('.', 1)[1].lower() in allowed_extensions
def rename_file(self, vessel_id, filename):
""" Rename File """
sql_str = "SELECT * FROM vessel_file"
sql_str += " WHERE vessel_id='{0}'".format(vessel_id)
sql_str += " AND file_name='{0}'".format(filename)
vessel_file = self.postgres.query_fetch_one(sql_str)
if vessel_file:
new_name = self.file_replace(vessel_file['file_name'])
return self.rename_file(vessel_id, new_name)
return filename
| 29.652174 | 92 | 0.526812 | 4,428 | 0.927524 | 0 | 0 | 0 | 0 | 0 | 0 | 1,919 | 0.401969 |
1df4682ad4e85d7a5440a5f0af1ac4170aeb6c7a | 389 | py | Python | graphgallery/gallery/gallery_model/pytorch/__init__.py | Aria461863631/GraphGallery | 7b62f80ab36b29013bea2538a6581fc696a80201 | [
"MIT"
] | null | null | null | graphgallery/gallery/gallery_model/pytorch/__init__.py | Aria461863631/GraphGallery | 7b62f80ab36b29013bea2538a6581fc696a80201 | [
"MIT"
] | null | null | null | graphgallery/gallery/gallery_model/pytorch/__init__.py | Aria461863631/GraphGallery | 7b62f80ab36b29013bea2538a6581fc696a80201 | [
"MIT"
] | null | null | null | from .gcn import GCN, DenseGCN
from .gat import GAT
from .clustergcn import ClusterGCN
from .fastgcn import FastGCN
from .dagnn import DAGNN
from .pairnorm import *
from .simpgcn import SimPGCN
from .mlp import MLP
from .tagcn import TAGCN
from .appnp import APPNP, PPNP
# experimantal model
from .experimental.median_gcn import MedianGCN
from .experimental.trimmed_gcn import TrimmedGCN
| 25.933333 | 48 | 0.81491 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20 | 0.051414 |
1df523cdc41ccb402483976f0573983b6122d8e9 | 4,412 | py | Python | tests/test_vims_pixel.py | seignovert/pyvims | a70b5b9b8bc5c37fa43b7db4d15407f312a31849 | [
"BSD-3-Clause"
] | 4 | 2019-09-16T15:50:22.000Z | 2021-04-08T15:32:48.000Z | tests/test_vims_pixel.py | seignovert/pyvims | a70b5b9b8bc5c37fa43b7db4d15407f312a31849 | [
"BSD-3-Clause"
] | 3 | 2018-05-04T09:28:24.000Z | 2018-12-03T09:00:31.000Z | tests/test_vims_pixel.py | seignovert/pyvims | a70b5b9b8bc5c37fa43b7db4d15407f312a31849 | [
"BSD-3-Clause"
] | 1 | 2020-10-12T15:14:17.000Z | 2020-10-12T15:14:17.000Z | """Test VIMS image module."""
from numpy.testing import assert_array_almost_equal as assert_array
from pytest import approx, fixture, raises
from pyvims import VIMS
from pyvims.errors import VIMSError
@fixture
def img_id():
"""Testing image ID."""
return '1731456416_1'
@fixture
def cube(img_id):
"""Testing cube."""
return VIMS(img_id)
@fixture
def pixel(cube):
"""Testing pixel (ground and specular)."""
return cube[6, 32]
@fixture
def limb_pixel(cube):
"""Testing limb pixel."""
return cube[1, 1]
def test_pixel_err(cube):
"""Test VIMS pixel errors."""
# Invalid sample value
with raises(VIMSError):
_ = cube[0, 1]
with raises(VIMSError):
_ = cube[100, 1]
with raises(TypeError):
_ = cube[1.1, 1]
# Invalid line value
with raises(VIMSError):
_ = cube[1, 0]
with raises(VIMSError):
_ = cube[1, 100]
with raises(TypeError):
_ = cube[1, 1.1]
def test_pixel_properties(pixel):
"""Test VIMS pixel properties (ground and specular)."""
assert pixel == '1731456416_1-S6_L32'
assert pixel != '1731456416_1-S6_L33'
assert pixel.s == 6
assert pixel.l == 32
assert pixel.i == 6 - 1
assert pixel.j == 32 - 1
assert pixel[352] == pixel @ 5.13 == pixel['5.13'] == pixel @ '352'
assert pixel[339:351] == pixel @ '4.91:5.11' == approx(0.162, abs=1e-3)
assert pixel.et == approx(406035298.3, abs=.1)
assert_array(pixel.j2000, [0.7299, 0.3066, -0.6109], decimal=4)
assert pixel.ra == approx(22.79, abs=1e-2)
assert pixel.dec == approx(-37.66, abs=1e-2)
assert pixel.lon == -pixel.lon_e == approx(136.97, abs=1e-2)
assert pixel.lat == approx(80.37, abs=1e-2)
assert pixel.alt == approx(0, abs=1e-2)
assert not pixel.limb
assert pixel.ground
assert pixel.slon == '137°W'
assert pixel.slat == '80°N'
assert pixel.salt == '0 km (Ground pixel)'
assert pixel.inc == approx(67.1, abs=.1)
assert pixel.eme == approx(64.4, abs=.1)
assert pixel.phase == approx(131.5, abs=.1)
assert_array(pixel.sc, [12.25, 31.96], decimal=2)
assert_array(pixel.ss, [185.00, 16.64], decimal=2)
assert pixel.dist_sc == approx(211868.6, abs=.1)
assert pixel.res_s == pixel.res_l == pixel.res == approx(104.9, abs=.1)
assert pixel.is_specular
assert pixel.specular_lon == approx(141.2, abs=.1)
assert pixel.specular_lat == approx(79.25, abs=.1)
assert pixel.specular_angle == approx(65.77, abs=.1)
assert pixel.specular_dist == approx(60.8, abs=.1)
assert len(pixel.spectrum) == len(pixel.wvlns)
assert pixel.wvlns[0] == approx(0.892, abs=1e-3)
assert pixel.spectrum[0] == approx(0.156, abs=1e-3)
def test_limb_pixel_properties_limb(limb_pixel):
"""Test VIMS limb pixel properties (not specular)."""
assert str(limb_pixel) == '1731456416_1-S1_L1'
assert limb_pixel.s == 1
assert limb_pixel.l == 1
assert limb_pixel.i == 0
assert limb_pixel.j == 0
assert limb_pixel.et == approx(406034072.7, abs=.1)
assert_array(limb_pixel.j2000, [0.7384, 0.2954, -0.6062], decimal=4)
assert limb_pixel.ra == approx(21.81, abs=1e-2)
assert limb_pixel.dec == approx(-37.31, abs=1e-2)
assert limb_pixel.lon == approx(251.01, abs=1e-2)
assert limb_pixel.lat == approx(41.17, abs=1e-2)
assert limb_pixel.alt == approx(1893.58, abs=1e-2)
assert limb_pixel.limb
assert not limb_pixel.ground
assert limb_pixel.slon == '109°E'
assert limb_pixel.slat == '41°N'
assert limb_pixel.salt == '1894 km (Limb pixel)'
assert limb_pixel.inc == approx(61.4, abs=.1)
assert limb_pixel.eme == approx(90.0, abs=.1)
assert limb_pixel.phase == approx(131.7, abs=.1)
assert limb_pixel.dist_sc == approx(219698.4, abs=.1)
assert limb_pixel.res == approx(108.7, abs=.1)
assert not limb_pixel.is_specular
def test_pixel_properties_err(pixel):
"""Test VIMS pixel properties errors."""
# Band invalid
with raises(VIMSError):
_ = pixel[1]
with raises(VIMSError):
_ = pixel[353]
# Wavelength invalid
with raises(VIMSError):
_ = pixel @ .5
with raises(VIMSError):
_ = pixel @ 6.
# Invalid index
with raises(VIMSError):
_ = pixel @ (1, 2, 3)
def test_pixel_plot(pixel):
"""Test pixel plot."""
pixel.plot(title='testing')
| 26.739394 | 75 | 0.635086 | 0 | 0 | 0 | 0 | 327 | 0.074049 | 0 | 0 | 608 | 0.137681 |
1df7cf4379736e54e6847b970614eade9f08fa85 | 4,239 | py | Python | Project2/system.py | KristianWold/FYS4410 | 25f23109468ed72b69a6e9af72957500d76f0e79 | [
"MIT"
] | 1 | 2020-04-30T18:39:43.000Z | 2020-04-30T18:39:43.000Z | Project2/system.py | KristianWold/FYS4411 | 25f23109468ed72b69a6e9af72957500d76f0e79 | [
"MIT"
] | null | null | null | Project2/system.py | KristianWold/FYS4411 | 25f23109468ed72b69a6e9af72957500d76f0e79 | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
from tqdm.notebook import tqdm
class System():
def __init__(self,
num_part,
dim,
Ansatz=None,
External=None,
Internal=None,
Sampler=None
):
self.num_part = num_part
self.dim = dim
self.Ansatz = Ansatz
self.External = External
self.Internal = Internal
self.Sampler = Sampler
self.Ansatz.system = self
self.Sampler.system = self
class Metropolis():
def __init__(self, step_length, steps):
self.step_length = step_length
self.steps = steps
def __call__(self, batch_size):
total_accepted = 0
dim = self.system.dim
# inital position for walkers
x_old = tf.random.uniform(
(batch_size, dim), minval=-2, maxval=2, dtype=tf.dtypes.float64)
psi_old = self.system.Ansatz(x_old).numpy()
# thermalizing steps
for i in range(self.steps):
x_new = x_old + self.step_length * \
tf.random.uniform((batch_size, dim), minval=-1, maxval=1,
dtype=tf.dtypes.float64)
psi_new = self.system.Ansatz(x_new).numpy()
U = np.random.uniform(0, 1, (batch_size, 1))
# vectorized acceptance criterion
mask = ((psi_new / psi_old)**2 > U)[:, 0]
x_old = x_old.numpy()
x_new = x_new.numpy()
# update walkers
x_old[mask] = x_new[mask]
psi_old[mask] = psi_new[mask]
x_old = tf.convert_to_tensor(x_old, dtype=tf.dtypes.float64)
total_accepted += np.sum(mask)
return x_old, total_accepted
class HarmonicOsc():
def __init__(self, omega):
self.omega = omega
def __call__(self, x):
V = 0.5 * self.omega**2 * \
tf.reshape(tf.reduce_sum(x**2, axis=1), (-1, 1))
return V
class Coulomb():
def __init__(self, alpha, beta):
self.alpha = alpha
self.beta = beta
def __call__(self, x, num_part, dim):
V = 0
for i in range(num_part):
for j in range(i):
r12 = tf.norm(x[:, i * dim:(i + 1) * dim] -
x[:, j * dim:(j + 1) * dim], axis=1)
r12 = tf.reshape(r12, (-1, 1))
V += self.alpha / tf.math.sqrt(r12**2 + self.beta**2)
return V
def oneBodyDensity(pos, bins, mode="radial"):
if mode == "radial1D":
density = np.zeros(bins.shape[0])
r_min = bins[0]
dr = bins[1] - bins[0]
rPos = np.linalg.norm(pos, axis=1)
for r in tqdm(rPos):
try:
density[int((r - r_min) // dr)] += 1 / dr
except:
pass
return density
if mode == "radial2D":
density = np.zeros(bins.shape[0])
r_min = bins[0]
dr = bins[1] - bins[0]
rPos = np.linalg.norm(pos, axis=1)
for r in tqdm(rPos):
try:
density[int((r - r_min) // dr)] += 1 / (2 * np.pi * dr * r)
except:
pass
return density
if mode == "radial3D":
density = np.zeros(bins.shape[0])
r_min = bins[0]
dr = bins[1] - bins[0]
rPos = np.linalg.norm(pos, axis=1)
for r in tqdm(rPos):
try:
density[int((r - r_min) // dr)] += 1 / (4 * np.pi * dr * r**2)
except:
pass
return density
if mode == "1D":
density = np.zeros(bins.shape[0])
x_min = bins[0]
dx = bins[1] - bins[0]
for x in tqdm(pos):
try:
density[int((x - x_min) // dx)] += 1
except:
pass
return density / dx
if mode == "2D":
density = np.zeros((bins.shape[0], bins.shape[0]))
y_min = x_min = bins[0]
dy = dx = bins[1] - bins[0]
for x, y in tqdm(pos):
try:
density[int((x - x_min) // dx), int((y - y_min) // dy)] += 1
except:
pass
return density / pos.shape[0]
| 27.348387 | 78 | 0.482425 | 2,414 | 0.569474 | 0 | 0 | 0 | 0 | 0 | 0 | 144 | 0.03397 |
1df80634b7ac4a729c96447edb28b15b92dc8f16 | 8,819 | py | Python | contrib/heat_docker/heat_docker/tests/test_docker_container.py | redhat-openstack/heat | 6b9be0a868b857e942c1cc90594d0f3a0d0725d0 | [
"Apache-2.0"
] | null | null | null | contrib/heat_docker/heat_docker/tests/test_docker_container.py | redhat-openstack/heat | 6b9be0a868b857e942c1cc90594d0f3a0d0725d0 | [
"Apache-2.0"
] | null | null | null | contrib/heat_docker/heat_docker/tests/test_docker_container.py | redhat-openstack/heat | 6b9be0a868b857e942c1cc90594d0f3a0d0725d0 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2013 Docker, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.utils import importutils
import six
from heat.common import exception
from heat.common import template_format
from heat.engine import resource
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.tests.common import HeatTestCase
from heat.tests import utils
from testtools import skipIf
from ..resources import docker_container # noqa
from .fake_docker_client import FakeDockerClient # noqa
docker = importutils.try_import('docker')
template = '''
{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Test template",
"Parameters": {},
"Resources": {
"Blog": {
"Type": "DockerInc::Docker::Container",
"Properties": {
"image": "samalba/wordpress",
"env": [
"FOO=bar"
]
}
}
}
}
'''
class DockerContainerTest(HeatTestCase):
def setUp(self):
super(DockerContainerTest, self).setUp()
for res_name, res_class in docker_container.resource_mapping().items():
resource._register_class(res_name, res_class)
self.addCleanup(self.m.VerifyAll)
def create_container(self, resource_name):
t = template_format.parse(template)
stack = utils.parse_stack(t)
resource = docker_container.DockerContainer(
resource_name,
stack.t.resource_definitions(stack)[resource_name], stack)
self.m.StubOutWithMock(resource, 'get_client')
resource.get_client().MultipleTimes().AndReturn(FakeDockerClient())
self.assertIsNone(resource.validate())
self.m.ReplayAll()
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
return resource
def get_container_state(self, resource):
client = resource.get_client()
return client.inspect_container(resource.resource_id)['State']
def test_resource_create(self):
container = self.create_container('Blog')
self.assertTrue(container.resource_id)
running = self.get_container_state(container)['Running']
self.assertIs(True, running)
client = container.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertIsNone(client.container_create[0]['name'])
def test_create_with_name(self):
t = template_format.parse(template)
stack = utils.parse_stack(t)
definition = stack.t.resource_definitions(stack)['Blog']
definition['Properties']['name'] = 'super-blog'
resource = docker_container.DockerContainer(
'Blog', definition, stack)
self.m.StubOutWithMock(resource, 'get_client')
resource.get_client().MultipleTimes().AndReturn(FakeDockerClient())
self.assertIsNone(resource.validate())
self.m.ReplayAll()
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
client = resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual('super-blog', client.container_create[0]['name'])
@mock.patch.object(docker_container.DockerContainer, 'get_client')
def test_create_failed(self, test_client):
mock_client = mock.Mock()
mock_client.inspect_container.return_value = {
"State": {
"ExitCode": -1
}
}
mock_client.logs.return_value = "Container startup failed"
test_client.return_value = mock_client
mock_stack = mock.Mock()
mock_stack.db_resource_get.return_value = None
res_def = mock.Mock(spec=rsrc_defn.ResourceDefinition)
docker_res = docker_container.DockerContainer("test", res_def,
mock_stack)
exc = self.assertRaises(resource.ResourceInError,
docker_res.check_create_complete,
'foo')
self.assertIn("Container startup failed", six.text_type(exc))
def test_start_with_bindings_and_links(self):
t = template_format.parse(template)
stack = utils.parse_stack(t)
definition = stack.t.resource_definitions(stack)['Blog']
definition['Properties']['port_bindings'] = {
'80/tcp': [{'HostPort': '80'}]}
definition['Properties']['links'] = {'db': 'mysql'}
resource = docker_container.DockerContainer(
'Blog', definition, stack)
self.m.StubOutWithMock(resource, 'get_client')
resource.get_client().MultipleTimes().AndReturn(FakeDockerClient())
self.assertIsNone(resource.validate())
self.m.ReplayAll()
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
client = resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual({'db': 'mysql'}, client.container_start[0]['links'])
self.assertEqual(
{'80/tcp': [{'HostPort': '80'}]},
client.container_start[0]['port_bindings'])
def test_resource_attributes(self):
container = self.create_container('Blog')
# Test network info attributes
self.assertEqual('172.17.42.1', container.FnGetAtt('network_gateway'))
self.assertEqual('172.17.0.3', container.FnGetAtt('network_ip'))
self.assertEqual('1080', container.FnGetAtt('network_tcp_ports'))
self.assertEqual('', container.FnGetAtt('network_udp_ports'))
# Test logs attributes
self.assertEqual('---logs_begin---', container.FnGetAtt('logs_head'))
self.assertEqual('---logs_end---', container.FnGetAtt('logs_tail'))
# Test a non existing attribute
self.assertRaises(exception.InvalidTemplateAttribute,
container.FnGetAtt, 'invalid_attribute')
def test_resource_delete(self):
container = self.create_container('Blog')
scheduler.TaskRunner(container.delete)()
self.assertEqual((container.DELETE, container.COMPLETE),
container.state)
running = self.get_container_state(container)['Running']
self.assertIs(False, running)
def test_resource_already_deleted(self):
container = self.create_container('Blog')
scheduler.TaskRunner(container.delete)()
running = self.get_container_state(container)['Running']
self.assertIs(False, running)
scheduler.TaskRunner(container.delete)()
self.m.VerifyAll()
@skipIf(docker is None, 'docker-py not available')
def test_resource_delete_exception(self):
response = mock.MagicMock()
response.status_code = 404
response.content = 'some content'
container = self.create_container('Blog')
self.m.StubOutWithMock(container.get_client(), 'kill')
container.get_client().kill(container.resource_id).AndRaise(
docker.errors.APIError('Not found', response))
self.m.StubOutWithMock(container, '_get_container_status')
container._get_container_status(container.resource_id).AndRaise(
docker.errors.APIError('Not found', response))
self.m.ReplayAll()
scheduler.TaskRunner(container.delete)()
self.m.VerifyAll()
def test_resource_suspend_resume(self):
container = self.create_container('Blog')
# Test suspend
scheduler.TaskRunner(container.suspend)()
self.assertEqual((container.SUSPEND, container.COMPLETE),
container.state)
running = self.get_container_state(container)['Running']
self.assertIs(False, running)
# Test resume
scheduler.TaskRunner(container.resume)()
self.assertEqual((container.RESUME, container.COMPLETE),
container.state)
running = self.get_container_state(container)['Running']
self.assertIs(True, running)
| 40.269406 | 79 | 0.652342 | 7,301 | 0.827872 | 0 | 0 | 1,671 | 0.189477 | 0 | 0 | 1,879 | 0.213063 |
1df8beec4fea3bcefebf61482d69a744dbb216ea | 302 | py | Python | apistar/exceptions.py | sylwekb/apistar | 890006884dbb9644824511e0275fa00515204c5b | [
"BSD-3-Clause"
] | null | null | null | apistar/exceptions.py | sylwekb/apistar | 890006884dbb9644824511e0275fa00515204c5b | [
"BSD-3-Clause"
] | null | null | null | apistar/exceptions.py | sylwekb/apistar | 890006884dbb9644824511e0275fa00515204c5b | [
"BSD-3-Clause"
] | null | null | null | class SchemaError(Exception):
def __init__(self, schema, code):
self.schema = schema
self.code = code
msg = schema.errors[code].format(**schema.__dict__)
super().__init__(msg)
class NoCurrentApp(Exception):
pass
class ConfigurationError(Exception):
pass
| 20.133333 | 59 | 0.65894 | 295 | 0.976821 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1dfaf8dad72aefecc4b8134cc8efce48a3d16816 | 288 | py | Python | pyslam/thirdparty/disk/submodules/torch-dimcheck/setup.py | dysdsyd/VO_benchmark | a7602edab934419c1ec73618ee655e18026f834f | [
"Apache-2.0"
] | 2 | 2021-09-11T09:13:31.000Z | 2021-11-03T01:39:56.000Z | pyslam/thirdparty/disk/submodules/torch-dimcheck/setup.py | dysdsyd/VO_benchmark | a7602edab934419c1ec73618ee655e18026f834f | [
"Apache-2.0"
] | null | null | null | pyslam/thirdparty/disk/submodules/torch-dimcheck/setup.py | dysdsyd/VO_benchmark | a7602edab934419c1ec73618ee655e18026f834f | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
setup(
name='torch-dimcheck',
version='0.0.1',
description='Dimensionality annotations for tensor parameters and return values',
packages=['torch_dimcheck'],
author='Michał Tyszkiewicz',
author_email='michal.tyszkiewicz@gmail.com',
)
| 26.181818 | 85 | 0.725694 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 158 | 0.546713 |
1dfc968b657f73ab414bcfadc856995832d9a8ce | 5,050 | py | Python | retropath2_wrapper/__main__.py | brsynth/RetroPath2-wrapper | 545f40ad0df7049332012ee5184138c6c14a9f35 | [
"MIT"
] | 4 | 2021-10-13T22:12:16.000Z | 2021-12-25T13:00:53.000Z | retropath2_wrapper/__main__.py | brsynth/RetroPath2-wrapper | 545f40ad0df7049332012ee5184138c6c14a9f35 | [
"MIT"
] | 6 | 2020-08-14T15:02:35.000Z | 2022-03-04T13:05:21.000Z | retropath2_wrapper/__main__.py | brsynth/RetroPath2-wrapper | 545f40ad0df7049332012ee5184138c6c14a9f35 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from os import (
path as os_path,
mkdir as os_mkdir,
getcwd
)
from argparse import ArgumentParser
from logging import (
Logger,
getLogger
)
from glob import glob
from typing import (
Dict,
)
from colored import fg, bg, attr
from brs_utils import (
create_logger
)
from .RetroPath2 import (
set_vars,
retropath2
)
from .Args import (
build_args_parser
)
from ._version import __version__
__ERROR_CODES__ = {
0: 0,
'NoError': 0,
'SrcInSink': 1,
'FileNotFound': 2,
'OSError': 3,
'NoSolution': 4,
'TimeLimit': 5,
'InChI': 6
}
def print_conf(
kvars: Dict,
prog: str,
logger: Logger = getLogger(__name__)
) -> None:
"""
Print configuration.
Parameters
----------
kvars : Dict
Dictionnary with variables to print.
logger : Logger
The logger object.
Returns
-------
int Return code.
"""
# print ('%s%s Configuration %s' % (fg('magenta'), attr('bold'), attr('reset')))
print('{fg}{attr1}Configuration {attr2}'.format(fg=fg('cyan'), attr1=attr('bold'), attr2=attr('reset')))
print('{fg}'.format(fg=fg('cyan')), end='')
print(' + ' + prog)
print(' |--version: '+__version__)
print(' + KNIME')
print(' |--path: '+kvars['kexec'])
# logger.info(' - version: '+kvars['kver'])
print(' + RetroPath2.0 workflow')
print(' |--path: '+kvars['workflow'])
# logger.info(' - version: r20210127')
print('')
print ('{attr}'.format(attr=attr('reset')), end='')
def _cli():
parser = build_args_parser()
args = parse_and_check_args(parser)
if args.log.lower() in ['silent', 'quiet'] or args.silent:
args.log = 'CRITICAL'
# Store KNIME vars into a dictionary
kvars = set_vars(
args.kexec,
args.kver,
args.kpkg_install,
args.kwf
)
# Print out configuration
if not args.silent and args.log.lower() not in ['critical', 'error']:
print_conf(kvars, prog = parser.prog)
# Create logger
logger = create_logger(parser.prog, args.log)
logger.debug('args: ' + str(args))
logger.debug('kvars: ' + str(kvars))
r_code, result_files = retropath2(
sink_file=args.sink_file, source_file=args.source_file, rules_file=args.rules_file,
outdir=args.outdir,
kvars=kvars,
max_steps=args.max_steps, topx=args.topx, dmin=args.dmin, dmax=args.dmax, mwmax_source=args.mwmax_source, mwmax_cof=args.mwmax_cof,
timeout=args.timeout,
logger=logger
)
print(r_code)
if r_code == 'OK' or r_code == 'TimeLimit':
logger.info('{attr1}Results{attr2}'.format(attr1=attr('bold'), attr2=attr('reset')))
logger.info(' |- Checking... ')
r_code = check_results(result_files, logger)
logger.info(' |--path: '+args.outdir)
else:
logger.error('Exiting...')
return __ERROR_CODES__[r_code]
def check_results(
result_files: Dict,
logger: Logger = getLogger(__name__)
) -> int:
# Check if any result has been found
r_code = check_scope(result_files['outdir'], logger)
if r_code == -1:
r_code = 'NoSolution'
return r_code
def check_scope(
outdir: str,
logger: Logger = getLogger(__name__)
) -> int:
"""
Check if result is present in outdir.
Parameters
----------
outdir : str
The folder where results heve been written.
logger : Logger
The logger object.
Returns
-------
int Return code.
"""
csv_scopes = sorted(
glob(os_path.join(outdir, '*_scope.csv')),
key=lambda scope: os_path.getmtime(scope)
)
if csv_scopes == []:
logger.warning(' Warning: No solution has been found')
return -1
return 0
def parse_and_check_args(
parser: ArgumentParser
) -> None:
args = parser.parse_args()
if args.kver is None and args.kpkg_install and args.kexec is not None:
parser.error("--kexec requires --kver.")
# Create outdir if does not exist
if not os_path.exists(args.outdir):
os_mkdir(args.outdir)
if args.source_file is not None:
if args.source_name is not None:
parser.error("--source_name is not compliant with --source_file.")
if args.source_inchi is not None:
parser.error("--source_inchi is not compliant with --source_file.")
else:
if args.source_inchi is None:
parser.error("--source_inchi is mandatory.")
if args.source_name is None or args.source_name == '':
args.source_name = 'target'
# Create temporary source file
args.source_file = os_path.join(args.outdir, 'source.csv')
with open(args.source_file, 'w') as temp_f:
temp_f.write('Name,InChI\n')
temp_f.write('"%s","%s"' % (args.source_name, args.source_inchi.strip()))
return args
if __name__ == '__main__':
_cli()
| 24.876847 | 139 | 0.6 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,555 | 0.307921 |
1dfcb4d77e3deb4901ba246b595206ea283bb0d6 | 136,009 | py | Python | src_data/community_health_metrics.py | marcmiquel/WDO | 7d8d8e912f8dbacb2cdc0f6fd5c26370b8310cbb | [
"MIT"
] | 3 | 2020-12-21T06:06:16.000Z | 2021-08-28T12:52:07.000Z | src_data/community_health_metrics.py | marcmiquel/WDO | 7d8d8e912f8dbacb2cdc0f6fd5c26370b8310cbb | [
"MIT"
] | 1 | 2021-01-27T19:33:20.000Z | 2021-01-27T19:33:20.000Z | src_data/community_health_metrics.py | marcmiquel/WDO | 7d8d8e912f8dbacb2cdc0f6fd5c26370b8310cbb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# script
import wikilanguages_utils
from wikilanguages_utils import *
# time
import time
import datetime
from dateutil import relativedelta
import calendar
# system
import os
import sys
import shutil
import re
import random
import operator
# databases
import MySQLdb as mdb, MySQLdb.cursors as mdb_cursors
import sqlite3
# files
import gzip
import zipfile
import bz2
import json
import csv
import codecs
# requests and others
import requests
import urllib
import webbrowser
import reverse_geocoder as rg
import numpy as np
from random import shuffle
# data
import pandas as pd
import gc
# https://stats.wikimedia.org/#/all-projects
# https://meta.wikimedia.org/wiki/List_of_Wikipedias/ca
# https://meta.wikimedia.org/wiki/Research:Metrics#Volume_of_contribution
# https://meta.wikimedia.org/wiki/Research:Wikistats_metrics/Active_editors
community_health_metrics_db = 'community_health_metrics.db'
# MAIN
def main():
create_community_health_metrics_db()
for languagecode in wikilanguagecodes: # wikilanguagecodes
print (languagecode)
editor_metrics_dump_iterator(languagecode) # it fills the database cawiki_editors, cawiki_editor_metrics
print ('dump iterator done.\n')
# # input('')
# editor_metrics_db_iterator(languagecode) # it fills the database cawiki_editor_metrics
# print ('database iterator done.\n')
# print ('hell yeh')
# input('')
# community_metrics_db_iterator(languagecode) # it fills the database cawiki_community_metrics
# input('')
print ('done')
###
# export_community_health_metrics_csv(languagecode) # it fills the database cawiki_editor_metrics
# editor_metrics_content_diversity(languagecode)
# editor_metrics_multilingual(languagecode)
################################################################
# FUNCTIONS
def create_community_health_metrics_db():
conn = sqlite3.connect(databases_path + community_health_metrics_db); cursor = conn.cursor()
for languagecode in wikilanguagecodes:
table_name = languagecode+'wiki_editors'
try:
cursor.execute("DROP TABLE "+table_name+";")
except:
pass
query = ("CREATE TABLE IF NOT EXISTS "+table_name+" (user_id integer, user_name text, bot text, user_flags text, highest_flag text, highest_flag_year_month text, gender text, primarybinary integer, primarylang text, primarybinary_ecount integer, totallangs_ecount integer, numberlangs integer, registration_date, year_month_registration, first_edit_timestamp text, year_month_first_edit text, year_first_edit text, lustrum_first_edit text, survived60d text, last_edit_timestamp text, year_last_edit text, lifetime_days integer, editing_days integer, percent_editing_days real, days_since_last_edit integer, seconds_between_last_two_edits integer, PRIMARY KEY (user_id, user_name))")
cursor.execute(query)
table_name = languagecode+'wiki_editor_metrics'
try:
cursor.execute("DROP TABLE "+table_name+";")
except:
pass
query = ("CREATE TABLE IF NOT EXISTS "+table_name+" (user_id integer, user_name text, abs_value real, rel_value real, metric_name text, year_month text, timestamp text, PRIMARY KEY (user_id, metric_name, year_month, timestamp))")
cursor.execute(query)
table_name = languagecode+'wiki_community_metrics'
try:
cursor.execute("DROP TABLE "+table_name+";")
except:
pass
query = ("CREATE TABLE IF NOT EXISTS "+table_name+" (year_month text, topic text, m1 text, m1_calculation text, m1_value text, m2 text, m2_calculation text, m2_value text, m1_count float, m2_count float, PRIMARY KEY (topic, m1, m1_calculation, m1_value, m2, m2_calculation, m2_value))")
cursor.execute(query)
table_name = languagecode+'wiki_article_metrics'
try:
cursor.execute("DROP TABLE "+table_name+";")
except:
pass
query = ("CREATE TABLE IF NOT EXISTS "+table_name+" (qitem text, page_id integer, page_title text, abs_value real, rel_value, metric_name text, year_month text, PRIMARY KEY (metric_name, year_month))")
cursor.execute(query)
### ---- ###
table_name = languagecode+'wiki_editor_content_metrics'
try:
cursor.execute("DROP TABLE "+table_name+";")
except:
pass
query = ("CREATE TABLE IF NOT EXISTS "+table_name+" (user_id integer, user_name text, content_type text, abs_value real, rel_value real, year_month text, PRIMARY KEY (user_name, user_id, content_type))")
cursor.execute(query)
conn.commit()
def get_mediawiki_paths(languagecode):
cym = cycle_year_month
d_paths = []
print ('/public/dumps/public/other/mediawiki_history/'+cym)
if os.path.isdir('/public/dumps/public/other/mediawiki_history/'+cym)==False:
cym = datetime.datetime.strptime(cym,'%Y-%m')-dateutil.relativedelta.relativedelta(months=1)
cym = cym.strftime('%Y-%m')
print ('/public/dumps/public/other/mediawiki_history/'+cym)
dumps_path = '/public/dumps/public/other/mediawiki_history/'+cym+'/'+languagecode+'wiki/'+cym+'.'+languagecode+'wiki.all-time.tsv.bz2'
if os.path.isfile(dumps_path):
print ('one all-time file.')
d_paths.append(dumps_path)
else:
print ('multiple files.')
for year in range (1999, 2025):
dumps_path = '/public/dumps/public/other/mediawiki_history/'+cym+'/'+languagecode+'wiki/'+cym+'.'+languagecode+'wiki.'+str(year)+'.tsv.bz2'
if os.path.isfile(dumps_path):
d_paths.append(dumps_path)
if len(d_paths) == 0:
for year in range(1999, 2025): # months
for month in range(0, 13):
if month > 9:
dumps_path = '/public/dumps/public/other/mediawiki_history/'+cym+'/'+languagecode+'wiki/'+cym+'.'+languagecode+'wiki.'+str(year)+'-'+str(month)+'.tsv.bz2'
else:
dumps_path = '/public/dumps/public/other/mediawiki_history/'+cym+'/'+languagecode+'wiki/'+cym+'.'+languagecode+'wiki.'+str(year)+'-0'+str(month)+'.tsv.bz2'
if os.path.isfile(dumps_path) == True:
d_paths.append(dumps_path)
print(len(d_paths))
print (d_paths)
return d_paths, cym
def editor_metrics_dump_iterator(languagecode):
functionstartTime = time.time()
function_name = 'editor_metrics_dump_iterator '+languagecode
print (function_name)
d_paths, cym = get_mediawiki_paths(languagecode)
if (len(d_paths)==0):
print ('dump error. this language has no mediawiki_history dump: '+languagecode)
# wikilanguages_utils.send_email_toolaccount('dump error at script '+script_name, dumps_path)
# quit()
conn = sqlite3.connect(databases_path + community_health_metrics_db); cursor = conn.cursor()
user_id_user_name_dict = {}
user_id_bot_dict = {}
user_id_user_groups_dict = {}
editor_first_edit_timestamp = {}
editor_registration_date = {}
editor_last_edit_timestamp = {}
editor_seconds_since_last_edit = {}
editor_user_group_dict = {}
editor_user_group_dict_timestamp = {}
# for the survival part
survived_dict = {}
survival_measures = []
user_id_edit_count = {}
editor_user_page_edit_count = {}
editor_user_page_talk_page_edit_count = {}
# for the monthly part
editor_monthly_namespace0_edits = {}
editor_monthly_namespace1_edits = {}
editor_monthly_namespace2_edits = {}
editor_monthly_namespace3_edits = {}
editor_monthly_namespace4_edits = {}
editor_monthly_namespace5_edits = {}
editor_monthly_namespace6_edits = {}
editor_monthly_namespace7_edits = {}
editor_monthly_namespace8_edits = {}
editor_monthly_namespace9_edits = {}
editor_monthly_namespace10_edits = {}
editor_monthly_namespace11_edits = {}
editor_monthly_namespace12_edits = {}
editor_monthly_namespace13_edits = {}
editor_monthly_namespace14_edits = {}
editor_monthly_namespace15_edits = {}
editor_monthly_user_page_edit_count = {}
editor_monthly_user_page_talk_page_edit_count = {}
editor_monthly_edits = {}
editor_monthly_seconds_between_edits = {}
editor_monthly_editing_days = {}
editor_monthly_created_articles = {}
editor_monthly_deleted_articles = {}
editor_monthly_moved_articles = {}
editor_monthly_undeleted_articles = {}
editor_monthly_accounts_created = {}
editor_monthly_users_renamed = {}
editor_monthly_autoblocks = {}
editor_monthly_edits_reverted = {}
editor_monthly_reverts_made = {}
last_year_month = 0
first_date = datetime.datetime.strptime('2001-01-01 01:15:15','%Y-%m-%d %H:%M:%S')
for dump_path in d_paths:
print('\n'+dump_path)
iterTime = time.time()
dump_in = bz2.open(dump_path, 'r')
line = 'something'
line = dump_in.readline()
while line != '':
# print ('*')
# print (line)
# print (seconds_since_last_edit)
# print ('*')
# input('')
line = dump_in.readline()
line = line.rstrip().decode('utf-8')[:-1]
values = line.split('\t')
if len(values)==1: continue
event_entity = values[1]
event_type = values[2]
event_user_id = values[5]
try: int(event_user_id)
except:
continue
event_user_text = values[7]
if event_user_text != '': user_id_user_name_dict[event_user_id] = event_user_text
else:
continue
try:
editor_last_edit = editor_last_edit_timestamp[event_user_id]
last_edit_date_dt = datetime.datetime.strptime(editor_last_edit[:len(editor_last_edit)-2],'%Y-%m-%d %H:%M:%S')
last_edit_year_month_day = datetime.datetime.strptime(last_edit_date_dt.strftime('%Y-%m-%d'),'%Y-%m-%d')
except:
last_edit_year_month_day = ''
event_timestamp = values[3]
event_timestamp_dt = datetime.datetime.strptime(event_timestamp[:len(event_timestamp)-2],'%Y-%m-%d %H:%M:%S')
editor_last_edit_timestamp[event_user_id] = event_timestamp
event_user_groups = values[11]
if event_user_groups != '':
user_id_user_groups_dict[event_user_id] = event_user_groups
page_namespace = values[28]
if event_entity == 'revision':
revision_is_identity_reverted = values[64]
# són edits que seran reverted en el futur.
if revision_is_identity_reverted == 'true':
try: editor_monthly_edits_reverted[event_user_id] = editor_monthly_edits_reverted[event_user_id]+1
except: editor_monthly_edits_reverted[event_user_id] = 1
# print ('made',revision_is_identity_reverted, values)
# input('')
revision_is_identity_revert = values[67]
# són edits que revert un altre edit
if revision_is_identity_revert == 'true':
try: editor_monthly_reverts_made[event_user_id] = editor_monthly_reverts_made[event_user_id]+1
except: editor_monthly_reverts_made[event_user_id] = 1
# print ('received',revision_is_identity_revert, values)
# input('')
elif event_entity == 'page' and page_namespace == '0':
if event_type == 'create':
try: editor_monthly_created_articles[event_user_id] = editor_monthly_created_articles[event_user_id]+1
except: editor_monthly_created_articles[event_user_id] = 1
elif event_type == 'delete':
try: editor_monthly_deleted_articles[event_user_id] = editor_monthly_deleted_articles[event_user_id]+1
except: editor_monthly_deleted_articles[event_user_id] = 1
elif event_type == 'move':
try: editor_monthly_moved_articles[event_user_id] = editor_monthly_moved_articles[event_user_id]+1
except: editor_monthly_moved_articles[event_user_id] = 1
elif event_type == 'restore':
try: editor_monthly_undeleted_articles[event_user_id] = editor_monthly_undeleted_articles[event_user_id]+1
except: editor_monthly_undeleted_articles[event_user_id] = 1
elif event_entity == 'user':
user_text = str(values[38]) # this is target of the event
if event_type == 'create' and event_user_text != user_text:
try: editor_monthly_accounts_created[event_user_id] = editor_monthly_accounts_created[event_user_id]+1
except: editor_monthly_accounts_created[event_user_id] = 1
elif event_type == 'rename':
try: editor_monthly_users_renamed[event_user_id] = editor_monthly_users_renamed[event_user_id]+1
except: editor_monthly_users_renamed[event_user_id] = 1
elif event_type == 'altergroups':
user_id = values[36]
user_group = values[41]
cur_ug = ''
if user_group != '' and user_group != None:
try:
cur_ug = editor_user_group_dict[user_id]
if len(cur_ug) < len(user_group):
change = user_group.replace(cur_ug,'').strip(',')
metric_name = 'granted_flag'
else:
change = cur_ug.replace(user_group,'').strip(',')
metric_name = 'removed_flag' # this is only for the case that one flag is removed by another editor. when an editor removes him/herself the flag, it does not appear here.
except:
change = user_group
metric_name = 'granted_flag'
# change (what is new + o -);
# user_group (what is he has after the change);
# cur_ug (what he had right before);
# values[42] (what he'll have in the future and in the end)
# input('')
editor_user_group_dict[user_id] = user_group
if change != '':
# user_text = values[38]
# print (user_id, user_text, ' - ', change, ' - ', user_group, ' - ', cur_ug ,' - ', values[42], ' - ', metric_name, event_timestamp)
# print ('\n',event_type, event_entity, event_user_text, cur_ug, event_user_groups,'\n',line)
if ',' in change:
change_ = change.split(',')
event_timestamp2 = event_timestamp[:len(event_timestamp)-2]
editor_user_group_dict_timestamp[user_id,event_timestamp] = [metric_name, change_[0], cur_ug]
editor_user_group_dict_timestamp[user_id,event_timestamp2] = [metric_name, change_[1], cur_ug]
else:
editor_user_group_dict_timestamp[user_id,event_timestamp] = [metric_name, change, cur_ug]
elif event_type == 'alterblocks':
try: editor_monthly_autoblocks[event_user_id] = editor_monthly_autoblocks[event_user_id]+1
except: editor_monthly_autoblocks[event_user_id] = 1
event_is_bot_by = values[13]
if event_is_bot_by != '':
user_id_bot_dict[event_user_id] = event_is_bot_by
# print (event_user_text, event_is_bot_by)
event_user_is_anonymous = values[17]
if event_user_is_anonymous == True or event_user_id == '': continue
event_user_registration_date = values[18]
if event_user_id not in editor_registration_date:
if event_user_registration_date != '':
editor_registration_date[event_user_id] = event_user_registration_date
####### ---------
# MONTHLY EDITS COUNTER
try: editor_monthly_edits[event_user_id] = editor_monthly_edits[event_user_id]+1
except: editor_monthly_edits[event_user_id] = 1
# MONTHLY NAMESPACES EDIT COUNTER
if page_namespace == '0':
try: editor_monthly_namespace0_edits[event_user_id] = editor_monthly_namespace0_edits[event_user_id]+1
except: editor_monthly_namespace0_edits[event_user_id] = 1
elif page_namespace == '1':
try: editor_monthly_namespace1_edits[event_user_id] = editor_monthly_namespace1_edits[event_user_id]+1
except: editor_monthly_namespace1_edits[event_user_id] = 1
elif page_namespace == '2':
try: editor_monthly_namespace2_edits[event_user_id] = editor_monthly_namespace2_edits[event_user_id]+1
except: editor_monthly_namespace2_edits[event_user_id] = 1
elif page_namespace == '3':
try: editor_monthly_namespace3_edits[event_user_id] = editor_monthly_namespace3_edits[event_user_id]+1
except: editor_monthly_namespace3_edits[event_user_id] = 1
elif page_namespace == '4':
try: editor_monthly_namespace4_edits[event_user_id] = editor_monthly_namespace4_edits[event_user_id]+1
except: editor_monthly_namespace4_edits[event_user_id] = 1
elif page_namespace == '5':
try: editor_monthly_namespace5_edits[event_user_id] = editor_monthly_namespace5_edits[event_user_id]+1
except: editor_monthly_namespace5_edits[event_user_id] = 1
elif page_namespace == '6':
try: editor_monthly_namespace6_edits[event_user_id] = editor_monthly_namespace6_edits[event_user_id]+1
except: editor_monthly_namespace6_edits[event_user_id] = 1
elif page_namespace == '7':
try: editor_monthly_namespace7_edits[event_user_id] = editor_monthly_namespace7_edits[event_user_id]+1
except: editor_monthly_namespace7_edits[event_user_id] = 1
elif page_namespace == '8':
try: editor_monthly_namespace8_edits[event_user_id] = editor_monthly_namespace8_edits[event_user_id]+1
except: editor_monthly_namespace8_edits[event_user_id] = 1
elif page_namespace == '9':
try: editor_monthly_namespace9_edits[event_user_id] = editor_monthly_namespace9_edits[event_user_id]+1
except: editor_monthly_namespace9_edits[event_user_id] = 1
elif page_namespace == '10':
try: editor_monthly_namespace10_edits[event_user_id] = editor_monthly_namespace10_edits[event_user_id]+1
except: editor_monthly_namespace10_edits[event_user_id] = 1
elif page_namespace == '11':
try: editor_monthly_namespace11_edits[event_user_id] = editor_monthly_namespace11_edits[event_user_id]+1
except: editor_monthly_namespace11_edits[event_user_id] = 1
elif page_namespace == '12':
try: editor_monthly_namespace12_edits[event_user_id] = editor_monthly_namespace12_edits[event_user_id]+1
except: editor_monthly_namespace12_edits[event_user_id] = 1
elif page_namespace == '13':
try: editor_monthly_namespace13_edits[event_user_id] = editor_monthly_namespace13_edits[event_user_id]+1
except: editor_monthly_namespace13_edits[event_user_id] = 1
elif page_namespace == '14':
try: editor_monthly_namespace14_edits[event_user_id] = editor_monthly_namespace14_edits[event_user_id]+1
except: editor_monthly_namespace14_edits[event_user_id] = 1
elif page_namespace == '15':
try: editor_monthly_namespace15_edits[event_user_id] = editor_monthly_namespace15_edits[event_user_id]+1
except: editor_monthly_namespace15_edits[event_user_id] = 1
# MONTHLY USER PAGE/USER PAGE TALK PAGE EDIT COUNTER
page_title = values[25]
if event_user_text == page_title and page_namespace == '2':
try:
editor_monthly_user_page_edit_count[event_user_id] = editor_monthly_user_page_edit_count[event_user_id]+1
except:
editor_monthly_user_page_edit_count[event_user_id] = 1
if event_user_text == page_title and page_namespace == '3':
try:
editor_monthly_user_page_talk_page_edit_count[event_user_id] = editor_monthly_user_page_talk_page_edit_count[event_user_id]+1
except:
editor_monthly_user_page_talk_page_edit_count[event_user_id] = 1
# MONTHLY AVERAGE SECONDS BETWEEN EDITS COUNTER
seconds_since_last_edit = values[22]
if seconds_since_last_edit != None and seconds_since_last_edit != '':
seconds_since_last_edit = int(seconds_since_last_edit)
editor_seconds_since_last_edit[event_user_id] = seconds_since_last_edit
if seconds_since_last_edit != None and seconds_since_last_edit != '':
if event_user_id != '' and event_user_id != 0:
try:
editor_monthly_seconds_between_edits[event_user_id].append(seconds_since_last_edit)
except:
editor_monthly_seconds_between_edits[event_user_id] = [seconds_since_last_edit]
# COUNTING DAYS
current_year_month_day = datetime.datetime.strptime(event_timestamp_dt.strftime('%Y-%m-%d'),'%Y-%m-%d')
if current_year_month_day != last_edit_year_month_day:
try: editor_monthly_editing_days[event_user_id]+=1
except: editor_monthly_editing_days[event_user_id]=1
#######--------- --------- --------- --------- --------- ---------
# CHECK MONTH CHANGE AND INSERT MONTHLY EDITS/NAMESPACES EDITS/SECONDS
current_year_month = datetime.datetime.strptime(event_timestamp_dt.strftime('%Y-%m'),'%Y-%m')
if last_year_month != current_year_month and last_year_month != 0:
lym = last_year_month.strftime('%Y-%m')
print (current_year_month, lym, cym)
lym_sp = lym.split('-')
ly = lym_sp[0]
lm = lym_sp[1]
lym_days = calendar.monthrange(int(ly),int(lm))[1]
monthly_articles = []
monthly_user_actions = []
monthly_reverts = []
monthly_edits = []
monthly_seconds = []
namespaces = []
for user_id, edits in editor_monthly_created_articles.items():
monthly_articles.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_created_articles', lym, ''))
for user_id, edits in editor_monthly_deleted_articles.items():
monthly_articles.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_deleted_articles', lym, ''))
for user_id, edits in editor_monthly_moved_articles.items():
monthly_articles.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_moved_articles', lym, ''))
for user_id, edits in editor_monthly_undeleted_articles.items():
monthly_articles.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_undeleted_articles', lym, ''))
for user_id, edits in editor_monthly_accounts_created.items():
monthly_user_actions.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_accounts_created', lym, ''))
for user_id, edits in editor_monthly_users_renamed.items():
monthly_user_actions.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_users_renamed', lym, ''))
for user_id, edits in editor_monthly_autoblocks.items():
monthly_user_actions.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_autoblocks', lym, ''))
for user_id, edits in editor_monthly_edits_reverted.items():
monthly_user_actions.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_edits_reverted', lym, ''))
for user_id, edits in editor_monthly_reverts_made.items():
monthly_user_actions.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_reverts_made', lym, ''))
for user_id, edits in editor_monthly_edits.items():
monthly_edits.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_edits', lym, ''))
for user_id, seconds_list in editor_monthly_seconds_between_edits.items():
if seconds_list == None: continue
elif len(seconds_list) > 1:
average_seconds = np.mean(seconds_list)
monthly_seconds.append((user_id, user_id_user_name_dict[user_id], average_seconds, None, 'monthly_average_seconds_between_edits', lym, ''))
for user_id, edits in editor_monthly_namespace0_edits.items():
try: namespaces.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_edits_ns0_main', lym, ''))
except: pass
for user_id, edits in editor_monthly_namespace1_edits.items():
try: namespaces.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_edits_ns1_talk', lym, ''))
except: pass
for user_id, edits in editor_monthly_namespace2_edits.items():
try: namespaces.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_edits_ns2_user', lym, ''))
except: pass
for user_id, edits in editor_monthly_namespace3_edits.items():
try: namespaces.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_edits_ns3_user_talk', lym, ''))
except: pass
for user_id, edits in editor_monthly_namespace4_edits.items():
try: namespaces.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_edits_ns4_project', lym, ''))
except: pass
for user_id, edits in editor_monthly_namespace5_edits.items():
try: namespaces.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_edits_ns5_project_talk', lym, ''))
except: pass
for user_id, edits in editor_monthly_namespace6_edits.items():
try: namespaces.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_edits_ns6_file', lym, ''))
except: pass
for user_id, edits in editor_monthly_namespace7_edits.items():
try: namespaces.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_edits_ns7_file_talk', lym, ''))
except: pass
for user_id, edits in editor_monthly_namespace8_edits.items():
try: namespaces.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_edits_ns8_mediawiki', lym, ''))
except: pass
for user_id, edits in editor_monthly_namespace9_edits.items():
try: namespaces.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_edits_ns9_mediawiki_talk', lym, ''))
except: pass
for user_id, edits in editor_monthly_namespace10_edits.items():
try: namespaces.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_edits_ns10_template', lym, ''))
except: pass
for user_id, edits in editor_monthly_namespace11_edits.items():
try: namespaces.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_edits_ns11_template_talk', lym, ''))
except: pass
for user_id, edits in editor_monthly_namespace12_edits.items():
try: namespaces.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_edits_ns12_help', lym, ''))
except: pass
for user_id, edits in editor_monthly_namespace13_edits.items():
try: namespaces.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_edits_ns13_help_talk', lym, ''))
except: pass
for user_id, edits in editor_monthly_namespace14_edits.items():
try: namespaces.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_edits_ns14_category', lym, ''))
except: pass
for user_id, edits in editor_monthly_namespace15_edits.items():
try: namespaces.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_edits_ns15_category_talk', lym, ''))
except: pass
for user_id, edits in editor_monthly_user_page_edit_count.items():
try: namespaces.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_user_page_edit_count', lym, ''))
except: pass
for user_id, edits in editor_monthly_user_page_talk_page_edit_count.items():
try: namespaces.append((user_id, user_id_user_name_dict[user_id], edits, None, 'monthly_user_page_talk_page_edit_count', lym, ''))
except: pass
for user_id, days in editor_monthly_editing_days.items():
try: namespaces.append((user_id, user_id_user_name_dict[user_id], days, 100*(days/lym_days), 'monthly_editing_days', lym, ''))
except: pass
for key, data in editor_user_group_dict_timestamp.items():
user_id = key[0]
timestamp = key[1]
metric_name = data[0]
flags = data[1]
old_flags = data[2]
try:
if metric_name == 'removed_flag':
namespaces.append((user_id, user_id_user_name_dict[user_id], old_flags, None, metric_name, lym, timestamp))
# print ((user_id, user_id_user_name_dict[user_id], old_flags, None, metric_name, lym, timestamp))
else:
namespaces.append((user_id, user_id_user_name_dict[user_id], flags, None, metric_name, lym, timestamp))
# print ((user_id, user_id_user_name_dict[user_id], flags, None, metric_name, lym, timestamp))
except: pass
query = 'INSERT OR IGNORE INTO '+languagecode+'wiki_editor_metrics (user_id, user_name, abs_value, rel_value, metric_name, year_month, timestamp) VALUES (?,?,?,?,?,?,?);'
cursor.executemany(query,monthly_articles)
cursor.executemany(query,monthly_edits)
cursor.executemany(query,monthly_reverts)
cursor.executemany(query,monthly_user_actions)
cursor.executemany(query,namespaces)
cursor.executemany(query,monthly_seconds)
conn.commit()
monthly_articles = []
monthly_user_actions = []
monthly_reverts = []
monthly_edits = []
monthly_seconds = []
namespaces = []
editor_monthly_created_articles = {}
editor_monthly_deleted_articles = {}
editor_monthly_moved_articles = {}
editor_monthly_undeleted_articles = {}
editor_monthly_accounts_created = {}
editor_monthly_users_renamed = {}
editor_monthly_autoblocks = {}
editor_monthly_edits_reverted = {}
editor_monthly_reverts_made = {}
editor_monthly_namespace0_edits = {}
editor_monthly_namespace1_edits = {}
editor_monthly_namespace2_edits = {}
editor_monthly_namespace3_edits = {}
editor_monthly_namespace4_edits = {}
editor_monthly_namespace5_edits = {}
editor_monthly_namespace6_edits = {}
editor_monthly_namespace7_edits = {}
editor_monthly_namespace8_edits = {}
editor_monthly_namespace9_edits = {}
editor_monthly_namespace10_edits = {}
editor_monthly_namespace11_edits = {}
editor_monthly_namespace12_edits = {}
editor_monthly_namespace13_edits = {}
editor_monthly_namespace14_edits = {}
editor_monthly_namespace15_edits = {}
editor_monthly_edits = {}
editor_monthly_seconds_between_edits = {}
editor_monthly_user_page_edit_count = {}
editor_monthly_user_page_talk_page_edit_count = {}
editor_monthly_editing_days = {}
editor_user_group_dict_timestamp = {}
last_year_month = current_year_month
####### ---------
# SURVIVAL MEASURES
event_user_first_edit_timestamp = values[20]
if event_user_id not in editor_first_edit_timestamp:
editor_first_edit_timestamp[event_user_id] = event_user_first_edit_timestamp
if event_user_first_edit_timestamp == '' or event_user_first_edit_timestamp == None:
event_user_first_edit_timestamp = editor_first_edit_timestamp[event_user_id]
if event_user_first_edit_timestamp != '' and event_user_id not in survived_dict:
event_user_first_edit_timestamp_dt = datetime.datetime.strptime(event_user_first_edit_timestamp[:len(event_user_first_edit_timestamp)-2],'%Y-%m-%d %H:%M:%S')
# thresholds
first_edit_timestamp_1day_dt = (event_user_first_edit_timestamp_dt + relativedelta.relativedelta(days=1))
first_edit_timestamp_7days_dt = (event_user_first_edit_timestamp_dt + relativedelta.relativedelta(days=7))
first_edit_timestamp_1months_dt = (event_user_first_edit_timestamp_dt + relativedelta.relativedelta(months=1))
first_edit_timestamp_2months_dt = (event_user_first_edit_timestamp_dt + relativedelta.relativedelta(months=2))
try: ec = user_id_edit_count[event_user_id]
except: ec = 1
# at 1 day
if event_timestamp_dt >= first_edit_timestamp_1day_dt:
survival_measures.append((event_user_id, event_user_text, ec, None, 'edit_count_24h', first_edit_timestamp_1day_dt.strftime('%Y-%m'),first_edit_timestamp_1day_dt.strftime('%Y-%m-%d %H:%M:%S')))
if event_user_id in editor_user_page_edit_count:
survival_measures.append((event_user_id, event_user_text, editor_user_page_edit_count[event_user_id], None, 'user_page_edit_count_24h', first_edit_timestamp_1day_dt.strftime('%Y-%m'),first_edit_timestamp_1day_dt.strftime('%Y-%m-%d %H:%M:%S')))
if event_user_id in editor_user_page_talk_page_edit_count:
survival_measures.append((event_user_id, event_user_text, editor_user_page_talk_page_edit_count[event_user_id], None, 'user_page_talk_page_edit_count_24h', first_edit_timestamp_1day_dt.strftime('%Y-%m'),first_edit_timestamp_1day_dt.strftime('%Y-%m-%d %H:%M:%S')))
# at 7 days
if event_timestamp_dt >= first_edit_timestamp_7days_dt:
survival_measures.append((event_user_id, event_user_text, ec, None, 'edit_count_7d', first_edit_timestamp_7days_dt.strftime('%Y-%m'),first_edit_timestamp_7days_dt.strftime('%Y-%m-%d %H:%M:%S')))
# at 1 month
if event_timestamp_dt >= first_edit_timestamp_1months_dt:
survival_measures.append((event_user_id, event_user_text, ec, None, 'edit_count_30d', first_edit_timestamp_1months_dt.strftime('%Y-%m'),first_edit_timestamp_1months_dt.strftime('%Y-%m-%d %H:%M:%S')))
if event_user_id in editor_user_page_edit_count:
survival_measures.append((event_user_id, event_user_text, editor_user_page_edit_count[event_user_id], None, 'user_page_edit_count_1month', first_edit_timestamp_1day_dt.strftime('%Y-%m'),first_edit_timestamp_1day_dt.strftime('%Y-%m-%d %H:%M:%S')))
if event_user_id in editor_user_page_talk_page_edit_count:
survival_measures.append((event_user_id, event_user_text, editor_user_page_talk_page_edit_count[event_user_id], None, 'user_page_talk_page_edit_count_1month', first_edit_timestamp_1day_dt.strftime('%Y-%m'),first_edit_timestamp_1day_dt.strftime('%Y-%m-%d %H:%M:%S')))
# at 2 months
if event_timestamp_dt >= first_edit_timestamp_2months_dt:
survival_measures.append((event_user_id, event_user_text, ec, None, 'edit_count_60d', first_edit_timestamp_2months_dt.strftime('%Y-%m'),first_edit_timestamp_2months_dt.strftime('%Y-%m-%d %H:%M:%S')))
survived_dict[event_user_id]=event_user_text
try: del user_id_edit_count[event_user_id]
except: pass
try: del editor_user_page_talk_page_edit_count[event_user_id]
except: pass
try: del editor_user_page_edit_count[event_user_id]
except: pass
# USER PAGE EDIT COUNT, ADD ONE MORE EDIT.
if event_user_id not in survived_dict:
if event_user_text == page_title and page_namespace == '2':
try:
editor_user_page_edit_count[event_user_id] = editor_user_page_edit_count[event_user_id]+1
except:
editor_user_page_edit_count[event_user_id] = 1
if event_user_text == page_title and page_namespace == '3':
try:
editor_user_page_talk_page_edit_count[event_user_id] = editor_user_page_talk_page_edit_count[event_user_id]+1
except:
editor_user_page_talk_page_edit_count[event_user_id] = 1
# EDIT COUNT, ADD ONE MORE EDIT.
event_user_revision_count = values[21]
if event_user_revision_count != '':
user_id_edit_count[event_user_id] = event_user_revision_count
elif event_user_id in user_id_edit_count:
user_id_edit_count[event_user_id] = int(user_id_edit_count[event_user_id]) + 1
else:
user_id_edit_count[event_user_id] = 1
####### ---------
# SURVIVAL MEASURES INSERT
query = 'INSERT OR IGNORE INTO '+languagecode+'wiki_editor_metrics (user_id, user_name, abs_value, rel_value, metric_name, year_month, timestamp) VALUES (?,?,?,?,?,?,?);'
cursor.executemany(query,survival_measures)
conn.commit()
survival_measures = []
# MONTHLY EDITS/SECONDS INSERT (LAST ROUND)
lym = last_year_month.strftime('%Y-%m')
if lym != cym:
monthly_edits = []
for event_user_id, edits in editor_monthly_edits.items():
monthly_edits.append((event_user_id, user_id_user_name_dict[event_user_id], edits, None, 'monthly_edits', lym, ''))
query = 'INSERT OR IGNORE INTO '+languagecode+'wiki_editor_metrics (user_id, user_name, abs_value, rel_value, metric_name, year_month, timestamp) VALUES (?,?,?,?,?,?,?);'
cursor.executemany(query,monthly_edits)
conn.commit()
editor_monthly_edits = {}
monthly_edits = []
monthly_seconds = []
for event_user_id, seconds_list in editor_monthly_seconds_between_edits.items():
if seconds_list == None: continue
elif len(seconds_list) > 1:
average_seconds = np.mean(seconds_list)
monthly_seconds.append((event_user_id, user_id_user_name_dict[event_user_id], average_seconds, None, 'monthly_average_seconds_between_edits', lym, ''))
query = 'INSERT OR IGNORE INTO '+languagecode+'wiki_editor_metrics (user_id, user_name, abs_value, rel_value, metric_name, year_month, timestamp) VALUES (?,?,?,?,?,?,?);'
cursor.executemany(query,monthly_seconds)
conn.commit()
editor_monthly_seconds_between_edits = {}
monthly_seconds = []
# USER CHARACTERISTICS INSERT
user_characteristics1 = []
user_characteristics2 = []
for user_id, user_name in user_id_user_name_dict.items():
try: user_flags = user_id_user_groups_dict[user_id]
except: user_flags = ''
try: bot = user_id_bot_dict[user_id]
except: bot = 'editor'
if user_id in survived_dict: survived60d = '1'
else: survived60d = '0'
try: registration_date = editor_registration_date[user_id]
except: registration_date = ''
if registration_date == '': # THIS IS SOMETHING WE "ASSUME" BECAUSE THERE ARE MANY ACCOUNTS WITHOUT A REGISTRATION DATE.
try: registration_date = editor_first_edit_timestamp[user_id]
except: registration_date = ''
if registration_date != '': year_month_registration = datetime.datetime.strptime(registration_date[:len(registration_date)-2],'%Y-%m-%d %H:%M:%S').strftime('%Y-%m')
else: year_month_registration = ''
try: fe = editor_first_edit_timestamp[user_id]
except: fe = ''
try:
le = editor_last_edit_timestamp[user_id]
year_last_edit = datetime.datetime.strptime(le[:len(le)-2],'%Y-%m-%d %H:%M:%S').strftime('%Y')
except:
le = ''
year_last_edit
if fe != '':
year_month = datetime.datetime.strptime(fe[:len(fe)-2],'%Y-%m-%d %H:%M:%S').strftime('%Y-%m')
year_first_edit = datetime.datetime.strptime(fe[:len(fe)-2],'%Y-%m-%d %H:%M:%S').strftime('%Y')
if int(year_first_edit) >= 2001 < 2006: lustrum_first_edit = '2001-2005'
if int(year_first_edit) >= 2006 < 2011: lustrum_first_edit = '2006-2010'
if int(year_first_edit) >= 2011 < 2016: lustrum_first_edit = '2011-2015'
if int(year_first_edit) >= 2016 < 2021: lustrum_first_edit = '2016-2020'
if int(year_first_edit) >= 2020 < 2026: lustrum_first_edit = '2021-2025'
fe_d = datetime.datetime.strptime(fe[:len(fe)-2],'%Y-%m-%d %H:%M:%S')
else:
year_month = ''
year_first_edit = ''
lustrum_first_edit = ''
fe_d = ''
if le != '':
le_d = datetime.datetime.strptime(le[:len(le)-2],'%Y-%m-%d %H:%M:%S')
days_since_last_edit = (event_timestamp_dt - le_d).days
else:
le_d = ''
days_since_last_edit = ''
if fe != '' and le != '': lifetime_days = (le_d - fe_d).days
else: lifetime_days = 0
try: se = editor_seconds_since_last_edit[user_id]
except: se = ''
user_characteristics1.append((user_id, user_name, registration_date, year_month_registration, fe, year_month, year_first_edit, lustrum_first_edit, survived60d))
user_characteristics2.append((bot, user_flags, le, year_last_edit, lifetime_days, days_since_last_edit, se, user_id, user_name))
query = 'INSERT OR IGNORE INTO '+languagecode+'wiki_editors (user_id, user_name, registration_date, year_month_registration, first_edit_timestamp, year_month_first_edit, year_first_edit, lustrum_first_edit, survived60d) VALUES (?,?,?,?,?,?,?,?,?);'
cursor.executemany(query,user_characteristics1)
query = 'INSERT OR IGNORE INTO '+languagecode+'wiki_editors (bot, user_flags, last_edit_timestamp, year_last_edit, lifetime_days, days_since_last_edit, seconds_between_last_two_edits, user_id, user_name) VALUES (?,?,?,?,?,?,?,?,?);'
cursor.executemany(query,user_characteristics2)
query = 'UPDATE '+languagecode+'wiki_editors SET bot = ?, user_flags = ?, last_edit_timestamp = ?, year_last_edit = ?, lifetime_days = ?, days_since_last_edit = ?, seconds_between_last_two_edits = ? WHERE user_id = ? AND user_name = ?;'
cursor.executemany(query,user_characteristics2)
conn.commit()
print (len(user_characteristics1),len(user_characteristics2))
user_characteristics1 = []
user_characteristics2 = []
# insert or ignore + update
user_id_bot_dict = {}
user_id_user_groups_dict = {}
editor_last_edit_timestamp = {}
editor_seconds_since_last_edit = {}
# insert or ignore
editor_first_edit_timestamp = {}
editor_registration_date = {}
# END OF THE DUMP!!!!
print ('end of the dump.')
print ('*')
print (str(datetime.timedelta(seconds=time.time() - iterTime)))
# AGGREGATED METRICS (EDIT COUNTS)
monthly_aggregated_metrics = {'monthly_edits':'edit_count', 'monthly_user_page_edit_count': 'edit_count_editor_user_page', 'monthly_user_page_talk_page_edit_count': 'edit_count_editor_user_page_talk_page', 'monthly_edits_ns0_main':'edit_count_ns0_main', 'monthly_edits_ns1_talk':'edit_count_ns1_talk', 'monthly_edits_ns2_user':'edit_count_ns2_user', 'monthly_edits_ns3_user_talk': 'edit_count_ns3_user_talk', 'monthly_edits_ns4_project':'edit_count_ns4_project', 'monthly_edits_ns5_project_talk': 'edit_count_ns5_project_talk', 'monthly_edits_ns6_file': 'edit_count_edits_ns6_file', 'monthly_edits_ns7_file_talk':'edit_count_ns7_file_talk', 'monthly_edits_ns8_mediawiki': 'edit_count_ns8_mediawiki', 'monthly_edits_ns9_mediawiki_talk': 'edit_count_ns9_mediawiki_talk', 'monthly_edits_ns10_template':'edit_count_ns10_template', 'monthly_edits_ns11_template_talk':'edit_count_ns11_template_talk', 'monthly_edits_ns12_help':'edit_count_ns12_help','monthly_edits_ns13_help_talk':'edit_count_ns13_help_talk','monthly_edits_ns14_category':'edit_count_ns14_category','monthly_edits_ns15_category_talk':'edit_count_ns15_category_talk','monthly_created_articles':'created_articles_count','monthly_deleted_articles':'deleted_articles_count','monthly_moved_articles':'moved_articles_count','monthly_undeleted_articles':'undeleted_articles_count','monthly_accounts_created':'created_accounts_count','monthly_users_renamed':'users_renamed_count','monthly_autoblocks':'autoblocks_count','monthly_edits_reverted':'edits_reverted_count','monthly_reverts_made':'reverts_made_count'}
conn2 = sqlite3.connect(databases_path + community_health_metrics_db); cursor2 = conn2.cursor()
for monthly_metric_name, metric_name in monthly_aggregated_metrics.items():
edit_counts = []
query = 'SELECT user_id, user_name, SUM(abs_value) FROM '+languagecode+'wiki_editor_metrics WHERE metric_name = "'+monthly_metric_name+'" GROUP BY 2;'
for row in cursor.execute(query):
edit_counts.append((row[0],row[1],row[2],metric_name,lym))
if metric_name == 'edit_count':
ec = row[2]
bin_v = ''
if ec > 1 and ec <= 100: bin_v = '1-100'
if ec > 100 and ec <= 500: bin_v = '101-500'
if ec > 500 and ec <= 1000: bin_v = '501-1000'
if ec > 1000 and ec <= 5000: bin_v = '1001-5000'
if ec > 5000 and ec <= 10000: bin_v = '5001-10000'
if ec > 10000 and ec <= 50000: bin_v = '10001-50000'
if ec > 50000 and ec <= 100000: bin_v = '50001-100000'
if ec > 100000 and ec <= 500000: bin_v = '100001-500000'
if ec > 500000 and ec <= 1000000: bin_v = '500001-1000000'
if ec > 1000000: bin_v = '1000001+'
if bin_v != '':
edit_counts.append((row[0],row[1],bin_v,'edit_count_bin',lym))
query = 'INSERT OR IGNORE INTO '+languagecode+'wiki_editor_metrics (user_id, user_name, abs_value, metric_name, year_month) VALUES (?,?,?,?,?);';
cursor2.executemany(query,edit_counts)
conn2.commit()
edit_counts = []
query = 'SELECT user_id, user_name, AVG(abs_value) FROM '+languagecode+'wiki_editor_metrics WHERE metric_name = "monthly_edits" GROUP BY 2;'
for row in cursor.execute(query):
ec = row[2]
bin_v = ''
if ec > 1 and ec <= 5: bin_v = '1-5'
if ec > 5 and ec <= 10: bin_v = '6-10'
if ec > 10 and ec <= 100: bin_v = '11-100'
if ec > 100 and ec <= 500: bin_v = '101-500'
if ec > 500 and ec <= 1000: bin_v = '501-1000'
if ec > 1000 and ec <= 5000: bin_v = '1001-5000'
if ec > 5000: bin_v = '5001+'
if bin_v != '':
edit_counts.append((row[0],row[1],bin_v,'monthly_edit_count_bin',lym))
query = 'INSERT OR IGNORE INTO '+languagecode+'wiki_editor_metrics (user_id, user_name, abs_value, metric_name, year_month) VALUES (?,?,?,?,?);';
cursor2.executemany(query,edit_counts)
conn2.commit()
edit_counts = []
# print ('stop monthly edit count'); input('stop');
# FLAGS UPDATE
# Getting the highest flag
conn = sqlite3.connect(databases_path + community_health_metrics_db); cursor = conn.cursor()
query = 'SELECT user_flags, count(user_id) FROM '+languagecode+'wiki_editors WHERE user_flags != "" GROUP BY 1;'
flags_count_dict = {}
for row in cursor.execute(query):
flags = row[0]
count = row[1]
if ',' in flags:
fs = flags.split(',')
for x in fs:
try:
flags_count_dict[x]+=count
except:
flags_count_dict[x]=1
else:
try:
flags_count_dict[flags]+=count
except:
flags_count_dict[flags]=1
print ('Number of editors for each flag')
print (flags_count_dict)
# print ('in')
# input('')
flag_ranks = {
'confirmed':1,'ipblock-exempt':1,
'filemover':2,'accountcreator':2,'autopatrolled':2,'reviewer':2,'autoreviewer':2,'rollbacker':2,'abusefilter':2,'abusefilter-ehlper':2,'interface-admin':2,'eventcoordinator':2,'extendedconfirmed':2,'extendedmover':2, 'filemover':2, 'massmessage-sender':2, 'patroller':2, 'researcher':2, 'templateeditor':2,
'sysop':3,'bureaucrat':3.5,
'checkuser':4,'oversight':4.5,
'steward':5.5, 'import':5,
'founder':6
}
query = 'SELECT user_id, user_flags, user_name FROM '+languagecode+'wiki_editors WHERE user_flags != "";'
params = []
user_id_flag = {}
for row in cursor.execute(query):
user_id = row[0]
user_flags = row[1]
user_name = row[2]
highest_rank = {}
highest_count = {}
if ',' in user_flags:
uf = user_flags.split(',')
for x in uf:
if x in flag_ranks and 'bot' not in x:
val = flag_ranks[x]
highest_rank[x] = val
if len(highest_rank) > 1:
maxval = max(highest_rank.values())
highest_rank = {key:val for key, val in highest_rank.items() if val == maxval} # we are choosing the flag of highest rank.
if len(highest_rank)>1:
for x in highest_rank.keys():
val = flags_count_dict[x]
highest_count[x] = val
maxval = max(highest_count.values())
highest_count = {key:val for key, val in highest_count.items() if val == maxval} # we are choosing the flag that exists more in the community.
f = list(highest_count.keys())[0]
params.append((f, user_id, user_name))
user_id_flag[user_id]=f
else:
f = list(highest_rank.keys())[0]
params.append((f, user_id, user_name))
user_id_flag[user_id]=f
else:
if user_flags in flag_ranks and 'bot' not in user_flags:
params.append((user_flags, user_id, user_name))
user_id_flag[user_id]=user_flags
query = 'UPDATE '+languagecode+'wiki_editors SET highest_flag = ? WHERE user_id = ? AND user_name = ?;'
cursor.executemany(query,params)
conn.commit()
print ('Updated the editors table with highest flag')
# let's update the highest_flag_year_month
query = 'SELECT year_month, user_id, user_name, abs_value FROM '+languagecode+'wiki_editor_metrics WHERE metric_name = "granted_flag";'
params2 = []
conn = sqlite3.connect(databases_path + community_health_metrics_db); cursor = conn.cursor()
for row in cursor.execute(query):
year_month=row[0]
user_id=row[1]
user_name=row[2]
flag = row[3]
try:
ex_flag = user_id_flag[user_id]
except:
continue
# print ((ex_flag, flag,year_month,user_id,user_name))
if ex_flag in flag:
# print ((ex_flag, flag,year_month,user_id,user_name))
params2.append((year_month,user_id,user_name))
# print (params2)
query = 'UPDATE '+languagecode+'wiki_editors SET highest_flag_year_month = ? WHERE user_id = ? AND user_name = ?;'
cursor.executemany(query,params2)
conn.commit()
print ('Updated the editors table with the year month they obtained the highest flag.')
# print(list(highest_flag.values()).count('bureaucrat'))
# print ('stop highest flag year month'); input('stop');
# If an editor has been granted the 'bot' flag, even if it has been taken away, it must be a flag.
query = 'SELECT user_id, user_name FROM '+languagecode+'wiki_editor_metrics WHERE metric_name = "granted_flag" AND abs_value LIKE "%bot";'
params = []
for row in cursor.execute(query):
username = row[1]
if 'bot' in username:
bottype = 'name,group'
else:
bottype = 'group'
params.append((bottype,row[0],username))
query = 'UPDATE '+languagecode+'wiki_editors SET bot = ? WHERE user_id = ? AND user_name = ?;'
cursor.executemany(query,params)
conn.commit()
print ('Updated the table with the bots from flag.')
def gender(languagecode):
functionstartTime = time.time()
function_name = 'gender '+languagecode
print (function_name)
conn = sqlite3.connect(databases_path + community_health_metrics_db); cursor = conn.cursor()
gender_params = []
query = 'SELECT up_value, user_name, up_user FROM user INNER JOIN user_properties ON user_id = up_user WHERE up_property = "gender";'
mysql_con_read = wikilanguages_utils.establish_mysql_connection_read(languagecode); mysql_cur_read = mysql_con_read.cursor()
mysql_cur_read.execute(query)
rows = mysql_cur_read.fetchall()
for row in rows:
gender_params.append((row[0], row[1], row[2]))
if len(gender_params) % 10000 == 0:
query = 'UPDATE '+languagecode+'wiki_editors SET gender = ? WHERE user_id = ? AND user_name = ?;'
cursor.executemany(query,user_characteristics2)
conn.commit()
gender_params = []
duration = str(datetime.timedelta(seconds=time.time() - functionstartTime))
print(languagecode+' '+ function_name+' '+ duration)
# gender(languagecode)
duration = str(datetime.timedelta(seconds=time.time() - functionstartTime))
print(languagecode+' '+ function_name+' '+ duration)
def editor_metrics_db_iterator(languagecode):
functionstartTime = time.time()
function_name = 'editor_metrics_db_iterator '+languagecode
print (function_name)
d_paths, cym = get_mediawiki_paths(languagecode)
cycle_year_month = cym
print (cycle_year_month)
conn = sqlite3.connect(databases_path + community_health_metrics_db); cursor = conn.cursor()
# MONTHLY EDITS LOOP
query = 'SELECT abs_value, year_month, user_id, user_name FROM '+languagecode+'wiki_editor_metrics WHERE metric_name = "monthly_edits" ORDER BY user_name, year_month'
# AND user_name in ("Toniher","Marcmiquel","Barcelona","TaronjaSatsuma","Kippelboy")
# print (query)
user_count = 0
old_user_id = ''
old_edits = None
expected_year_month_dt = ''
# parameters = []
# editors_edits_baseline_parameters = []
active_months = 0
active_months_row = 0
total_months = 0
max_active_months_row = 0
inactivity_periods = 0
inactive_months = 0
max_inactive_months_row = 0
total_edits = []
edits_increase_decrease = 0
try: os.remove(databases_path +'temporary_editor_metrics.txt')
except: pass
edfile2 = open(databases_path+'temporary_editor_metrics.txt', "w")
for row in cursor.execute(query):
edits=row[0]
current_year_month = row[1]
cur_user_id = row[2]
cur_user_name = row[3]
if cur_user_id != old_user_id and old_user_id != '':
user_count += 1
cycle_year_month_dt = datetime.datetime.strptime(cycle_year_month,'%Y-%m')
months_since_last_edit = (cycle_year_month_dt.year - current_year_month_dt.year) * 12 + cycle_year_month_dt.month - current_year_month_dt.month
if months_since_last_edit < 0: months_since_last_edit = 0
if months_since_last_edit > 0:
edfile2.write(str(old_user_id)+'\t'+old_user_name+'\t'+str(months_since_last_edit)+'\t'+" "+'\t'+"months_since_last_edit"+'\t'+old_year_month+'\t'+" "+'\n')
if months_since_last_edit > max_inactive_months_row:
edfile2.write(str(old_user_id)+'\t'+old_user_name+'\t'+str(months_since_last_edit)+'\t'+" "+'\t'+"max_inactive_months_row"+'\t'+old_year_month+'\t'+" "+'\n')
edfile2.write(str(old_user_id)+'\t'+old_user_name+'\t'+str(1)+'\t'+" "+'\t'+"over_past_max_inactive_months_row"+'\t'+cycle_year_month+'\t'+" "+'\n')
else:
edfile2.write(str(old_user_id)+'\t'+old_user_name+'\t'+str(max_inactive_months_row)+'\t'+" "+'\t'+"max_inactive_months_row"+'\t'+old_year_month+'\t'+" "+'\n')
edfile2.write(str(old_user_id)+'\t'+old_user_name+'\t'+str(inactivity_periods)+'\t'+" "+'\t'+"inactivity_periods"+'\t'+old_year_month+'\t'+" "+'\n')
edfile2.write(str(old_user_id)+'\t'+old_user_name+'\t'+str(active_months)+'\t'+" "+'\t'+"active_months"+'\t'+old_year_month+'\t'+" "+'\n')
edfile2.write(str(old_user_id)+'\t'+old_user_name+'\t'+str(max_active_months_row)+'\t'+" "+'\t'+"max_active_months_row"+'\t'+old_year_month+'\t'+" "+'\n')
edfile2.write(str(old_user_id)+'\t'+old_user_name+'\t'+str(total_months)+'\t'+" "+'\t'+"total_months"+'\t'+old_year_month+'\t'+" "+'\n')
active_months = 0
total_months = 0
active_months_row = 0
max_active_months_row = 0
inactivity_periods = 0
inactive_months = 0
max_inactive_months_row = 0
total_edits = []
old_edits = None
current_year_month_dt = datetime.datetime.strptime(current_year_month,'%Y-%m')
# here there is a change of month
# if the month is not the expected one
if expected_year_month_dt != current_year_month_dt and expected_year_month_dt != '' and old_user_id == cur_user_id:
inactivity_periods += 1
while expected_year_month_dt < current_year_month_dt:
# print (expected_year_month_dt, current_year_month_dt)
inactive_months = inactive_months + 1
expected_year_month_dt = (expected_year_month_dt + relativedelta.relativedelta(months=1))
total_months = total_months + 1
if inactive_months > max_inactive_months_row:
max_inactive_months_row = inactive_months
if active_months_row > max_active_months_row:
max_active_months_row = active_months_row
edfile2.write(str(cur_user_id)+'\t'+cur_user_name+'\t'+str(inactive_months)+'\t'+" "+'\t'+"inactive_months_row"+'\t'+current_year_month+'\t'+" "+'\n')
active_months_row = 1
inactive_months = 0
edits_increase_decrease = 1
edfile2.write(str(cur_user_id)+'\t'+cur_user_name+'\t'+str(edits_increase_decrease)+'\t'+" "+'\t'+"monthly_edits_increasing_decreasing"+'\t'+current_year_month+'\t'+" "+'\n')
else:
active_months_row = active_months_row + 1
if active_months_row > 1:
edfile2.write(str(cur_user_id)+'\t'+cur_user_name+'\t'+str(active_months_row)+'\t'+" "+'\t'+"active_months_row"+'\t'+current_year_month+'\t'+" "+'\n')
if active_months_row > max_active_months_row:
max_active_months_row = active_months_row
if inactive_months == 0 and total_months == 0:
edfile2.write(str(cur_user_id)+'\t'+cur_user_name+'\t'+str(-1)+'\t'+" "+'\t'+"inactive_months_row"+'\t'+current_year_month+'\t'+" "+'\n')
else:
edfile2.write(str(cur_user_id)+'\t'+cur_user_name+'\t'+str(inactive_months)+'\t'+" "+'\t'+"inactive_months_row"+'\t'+current_year_month+'\t'+" "+'\n')
if old_edits != None:
if old_edits > edits:
if edits_increase_decrease <= 0: edits_increase_decrease = edits_increase_decrease - 1
else: edits_increase_decrease = -1
elif old_edits < edits:
if edits_increase_decrease >= 0: edits_increase_decrease = edits_increase_decrease + 1
else: edits_increase_decrease = 1
else:
edits_increase_decrease = 0
edfile2.write(str(cur_user_id)+'\t'+cur_user_name+'\t'+str(edits_increase_decrease)+'\t'+" "+'\t'+"monthly_edits_increasing_decreasing"+'\t'+current_year_month+'\t'+" "+'\n')
else:
edits_increase_decrease = 1
if total_edits != []:
median_total_edits = np.median(total_edits)
edfile2.write(str(cur_user_id)+'\t'+cur_user_name+'\t'+str((100*edits/median_total_edits - 100))+'\t'+" "+'\t'+"monthly_edits_to_baseline"+'\t'+current_year_month+'\t'+" "+'\n')
# if cur_user_name == '-Erick-':
# print (str(cur_user_id)+'\t'+cur_user_name+','+str((100*edits/median_total_edits - 100))+'\t'+" "+'\t'+"monthly_edits_to_baseline"+'\t'+current_year_month+'\n')
total_edits.append(edits)
old_edits = edits
total_months = total_months + 1
active_months = active_months + 1
old_year_month = current_year_month
expected_year_month_dt = (datetime.datetime.strptime(old_year_month,'%Y-%m') + relativedelta.relativedelta(months=1))
old_user_id = cur_user_id
old_user_name = cur_user_name
# print ('# update: ',old_user_id, old_user_name, active_months, max_active_months_row, max_inactive_months_row, total_months)
# input('')
cycle_year_month_dt = datetime.datetime.strptime(cycle_year_month,'%Y-%m')
if current_year_month_dt == None:
print ('The table is empty. ERROR.')
months_since_last_edit = (cycle_year_month_dt.year - current_year_month_dt.year) * 12 + cycle_year_month_dt.month - current_year_month_dt.month
if months_since_last_edit < 0: months_since_last_edit = 0
if months_since_last_edit > 0:
edfile2.write(str(old_user_id)+'\t'+old_user_name+'\t'+str(months_since_last_edit)+'\t'+" "+'\t'+"months_since_last_edit"+'\t'+old_year_month+'\t'+" "+'\n')
if months_since_last_edit > max_inactive_months_row:
edfile2.write(str(old_user_id)+'\t'+old_user_name+'\t'+str(months_since_last_edit)+'\t'+" "+'\t'+"max_inactive_months_row"+'\t'+old_year_month+'\t'+" "+'\n')
edfile2.write(str(old_user_id)+'\t'+old_user_name+'\t'+str(1)+'\t'+" "+'\t'+"over_past_max_inactive_months_row"+'\t'+cycle_year_month+'\t'+" "+'\n')
else:
edfile2.write(str(old_user_id)+'\t'+old_user_name+'\t'+str(max_inactive_months_row)+'\t'+" "+'\t'+"max_inactive_months_row"+'\t'+old_year_month+'\t'+" "+'\n')
edfile2.write(str(old_user_id)+'\t'+old_user_name+'\t'+str(inactivity_periods)+'\t'+" "+'\t'+"inactivity_periods"+'\t'+old_year_month+'\t'+" "+'\n')
edfile2.write(str(old_user_id)+'\t'+old_user_name+'\t'+str(active_months)+'\t'+" "+'\t'+"active_months"+'\t'+old_year_month+'\t'+" "+'\n')
edfile2.write(str(old_user_id)+'\t'+old_user_name+'\t'+str(max_active_months_row)+'\t'+" "+'\t'+"max_active_months_row"+'\t'+old_year_month+'\t'+" "+'\n')
edfile2.write(str(old_user_id)+'\t'+old_user_name+'\t'+str(total_months)+'\t'+" "+'\t'+"total_months"+'\t'+old_year_month+'\t'+" "+'\n')
conn = sqlite3.connect(databases_path + community_health_metrics_db); cursor = conn.cursor()
a_file = open(databases_path+"temporary_editor_metrics.txt")
editors_metrics_parameters = csv.reader(a_file, delimiter="\t", quotechar = '|')
# edfile2 = open(databases_path+'temporary_editor_metrics.txt', "r")
# editors_metrics_parameters = []
# while True:
# user_count+=1
# line = edfile2.readline()
# char = line.strip().split('\t')
# # print (char)
# try:
# metric_name = char[4]
# # print (metric_name)
# if metric_name != '': editors_metrics_parameters.append((char[0],char[1],char[2],char[3],metric_name,char[5]))
# except:
# pass
# if user_count % 100000 == 0:
# query = 'INSERT OR IGNORE INTO '+languagecode+'wiki_editor_metrics (user_id, user_name, abs_value, rel_value, metric_name, year_month) VALUES (?,?,?,?,?,?);'
# cursor.executemany(query,editors_metrics_parameters)
# # print (len(editors_metrics_parameters))
# conn.commit()
# editors_metrics_parameters = []
# if not line: break
query = 'INSERT OR IGNORE INTO '+languagecode+'wiki_editor_metrics (user_id, user_name, abs_value, rel_value, metric_name, year_month, timestamp) VALUES (?,?,?,?,?,?,?);'
cursor.executemany(query,editors_metrics_parameters)
conn.commit()
os.remove(databases_path +'temporary_editor_metrics.txt')
editors_metrics_parameters = []
print ('done with the monthly edits.')
conn = sqlite3.connect(databases_path + community_health_metrics_db); cursor = conn.cursor()
conn2 = sqlite3.connect(databases_path + community_health_metrics_db); cursor2 = conn2.cursor()
# MONTHLY EDITING DAYS LOOP
query = 'SELECT abs_value, year_month, user_id, user_name FROM '+languagecode+'wiki_editor_metrics WHERE metric_name = "monthly_editing_days" ORDER BY user_id, year_month'
# print (query)
user_count = 0
old_user_id = ''
expected_year_month_dt = ''
editing_days = []
sum_editing_days = 0
try: os.remove(databases_path +'temporary_editors.txt')
except: pass
try: os.remove(databases_path +'temporary_editor_metrics.txt')
except: pass
edfile = open(databases_path+'temporary_editors.txt', "w")
edfile2 = open(databases_path+'temporary_editor_metrics.txt', "w")
for row in cursor.execute(query):
monthly_editing_days=row[0]
current_year_month = row[1]
cur_user_id = row[2]
cur_user_name = row[3]
# print (row)
if cur_user_id != old_user_id and old_user_id != '':
user_count += 1
sum_editing_days = int(sum(editing_days))
edfile.write(str(sum_editing_days)+'\t'+str(old_user_id)+'\t'+old_user_name+'\n')
if editing_days != []:
median_editing_days = np.median(editing_days)
if median_editing_days == 0:
value = 0
else:
value = (100*monthly_editing_days/median_editing_days - 100)
edfile2.write(str(old_user_id)+'\t'+old_user_name+'\t'+str(value)+'\t'+" "+'\t'+"monthly_editing_days_to_baseline"+'\t'+current_year_month+'\n')
sum_editing_days = 0
editing_days = []
current_year_month_dt = datetime.datetime.strptime(current_year_month,'%Y-%m')
if expected_year_month_dt != current_year_month_dt and expected_year_month_dt != '' and old_user_id == cur_user_id:
while expected_year_month_dt < current_year_month_dt:
editing_days.append(0)
expected_year_month_dt = (expected_year_month_dt + relativedelta.relativedelta(months=1))
editing_days.append(monthly_editing_days)
old_year_month = current_year_month
expected_year_month_dt = (datetime.datetime.strptime(old_year_month,'%Y-%m') + relativedelta.relativedelta(months=1))
old_user_id = cur_user_id
old_user_name = cur_user_name
# print ('out of the loop')
# print (user_count)
# last row percent baseline
if editing_days != []:
median_editing_days = np.median(editing_days)
if median_editing_days == 0:
value = 0
else:
value = (100*monthly_editing_days/median_editing_days - 100)
edfile2.write(str(old_user_id)+'\t'+old_user_name+'\t'+str(value)+'\t'+" "+'\t'+"monthly_editing_days_to_baseline"+'\t'+current_year_month+'\n')
sum_editing_days = sum(editing_days)
edfile.write(str(sum_editing_days)+','+str(old_user_id)+','+old_user_name+'\n')
# BASELINE MEASURES
# edfile = open(databases_path+'temporary_editor_metrics.txt', "r")
# editors_metrics_parameters = []
a_file = open(databases_path+"temporary_editor_metrics.txt")
editors_metrics_parameters = csv.reader(a_file, delimiter="\t", quotechar = '|')
# while True:
# user_count+=1
# line = edfile.readline()
# char = line.strip().split('\t')
# try:
# metric_name = char[4]
# if metric_name != '': editors_metrics_parameters.append((char[0],char[1],char[2],char[3],metric_name,char[5]))
# except:
# pass
# if user_count % 100000 == 0:
# query = 'INSERT OR IGNORE INTO '+languagecode+'wiki_editor_metrics (user_id, user_name, abs_value, rel_value, metric_name, year_month) VALUES (?,?,?,?,?,?);'
# cursor2.executemany(query,editors_metrics_parameters)
# conn2.commit()
# editors_metrics_parameters = []
# if not line: break
query = 'INSERT OR IGNORE INTO '+languagecode+'wiki_editor_metrics (user_id, user_name, abs_value, rel_value, metric_name, year_month) VALUES (?,?,?,?,?,?);'
cursor2.executemany(query,editors_metrics_parameters)
conn2.commit()
os.remove(databases_path +'temporary_editor_metrics.txt')
# EDITING DAYS
# sum
# edfile = open(databases_path+'temporary_editors.txt', "r")
# editors_characteristics_parameters = []
a_file = open(databases_path+"temporary_editors.txt")
editors_characteristics_parameters = csv.reader(a_file, delimiter="\t", quotechar = '|')
# while True:
# user_count+=1
# line = edfile.readline()
# char = line.strip().split('\t')
# try:
# editors_characteristics_parameters.append((char[0],char[1],char[2]))
# except:
# pass
# if user_count % 100000 == 0:
# query = 'UPDATE '+languagecode+'wiki_editors SET editing_days = ? WHERE user_id = ? AND user_name = ?;'
# cursor2.executemany(query,editors_characteristics_parameters)
# conn2.commit()
# editors_characteristics_parameters = []
# if not line: break
query = 'UPDATE '+languagecode+'wiki_editors SET editing_days = ? WHERE user_id = ? AND user_name = ?;'
cursor2.executemany(query,editors_characteristics_parameters)
conn2.commit()
os.remove(databases_path +'temporary_editors.txt')
editors_characteristics_parameters = []
# percent
query = 'UPDATE '+languagecode+'wiki_editors SET percent_editing_days = (100*editing_days/lifetime_days);'
cursor.execute(query)
conn.commit()
print ('done with the monthly editing days.')
duration = str(datetime.timedelta(seconds=time.time() - functionstartTime))
#### --------- --------- --------- --------- --------- --------- --------- --------- ---------
# # OVER PAST MAX INACTIVE MONTHS ROW
# query = 'INSERT OR IGNORE INTO '+languagecode+'wiki_editor_metrics (user_id, user_name, abs_value, rel_value, metric_name, year_month, timestamp) SELECT i1.user_id, i1.user_name, (i1.abs_value - i2.abs_value), i1.rel_value, "over_past_max_inactive_months_row", i2.year_month, i2.timestamp FROM '+languagecode+'wiki_editor_metrics i1 INNER JOIN '+languagecode+'wiki_editor_metrics i2 ON i1.user_id = i2.user_id WHERE i1.metric_name = "max_inactive_months_row" AND i2.metric_name = "months_since_last_edit";'
# cursor.execute(query)
# conn.commit()
# OVER EDIT BIN AVERAGE PAST MAX INACTIVE MONTHS ROW
edit_bin_average_past_max_inactive_months_row = {}
query = 'SELECT i2.abs_value, AVG(i1.abs_value) FROM '+languagecode+'wiki_editor_metrics i1 INNER JOIN '+languagecode+'wiki_editor_metrics i2 ON i1.user_id = i2.user_id WHERE i1.metric_name = "max_inactive_months_row" AND i2.metric_name = "edit_count_bin" GROUP BY i2.abs_value;';
for row in cursor.execute(query):
edit_bin_average_past_max_inactive_months_row[row[0]]=row[1]
for edit_bin, average in edit_bin_average_past_max_inactive_months_row.items():
query = 'INSERT OR IGNORE INTO '+languagecode+'wiki_editor_metrics (user_id, user_name, abs_value, rel_value, metric_name, year_month, timestamp) SELECT i1.user_id, i1.user_name, (? - i2.abs_value), i1.rel_value, "over_edit_bin_average_past_max_inactive_months_row", i2.year_month, i2.timestamp FROM '+languagecode+'wiki_editor_metrics i1 INNER JOIN '+languagecode+'wiki_editor_metrics i2 ON i1.user_id = i2.user_id WHERE i1.metric_name = "edit_count_bin" AND i1.abs_value = ? AND i2.metric_name = "months_since_last_edit";'
cursor.execute(query,(average, edit_bin))
conn.commit()
# OVER MONTHLY EDIT BIN AVERAGE PAST MAX INACTIVE MONTHS ROW
edit_bin_average_past_max_inactive_months_row = {}
query = 'SELECT i2.abs_value, AVG(i1.abs_value) FROM '+languagecode+'wiki_editor_metrics i1 INNER JOIN '+languagecode+'wiki_editor_metrics i2 ON i1.user_id = i2.user_id WHERE i1.metric_name = "max_inactive_months_row" AND i2.metric_name = "monthly_edit_count_bin" GROUP BY i2.abs_value;';
for row in cursor.execute(query):
edit_bin_average_past_max_inactive_months_row[row[0]]=row[1]
for edit_bin, average in edit_bin_average_past_max_inactive_months_row.items():
query = 'INSERT OR IGNORE INTO '+languagecode+'wiki_editor_metrics (user_id, user_name, abs_value, rel_value, metric_name, year_month, timestamp) SELECT i1.user_id, i1.user_name, (? - i2.abs_value), i1.rel_value, "over_monthly_edit_bin_average_past_max_inactive_months_row", i2.year_month, i2.timestamp FROM '+languagecode+'wiki_editor_metrics i1 INNER JOIN '+languagecode+'wiki_editor_metrics i2 ON i1.user_id = i2.user_id WHERE i1.metric_name = "monthly_edit_count_bin" AND i1.abs_value = ? AND i2.metric_name = "months_since_last_edit";'
cursor.execute(query,(average, edit_bin))
conn.commit()
duration = str(datetime.timedelta(seconds=time.time() - functionstartTime))
print(languagecode+' '+ function_name+' '+ duration)
def community_metrics_db_iterator(languagecode):
functionstartTime = time.time()
function_name = 'community_metrics_db_iterator '+languagecode
print (function_name)
conn = sqlite3.connect(databases_path + community_health_metrics_db); cursor = conn.cursor()
d_paths, cym = get_mediawiki_paths(languagecode)
cycle_year_month = cym
print (cycle_year_month)
query_cm = 'INSERT OR IGNORE INTO '+languagecode+'wiki_community_metrics (year_month, topic, m1, m1_calculation, m1_value, m2, m2_calculation, m2_value, m1_count, m2_count) VALUES (?,?,?,?,?,?,?,?,?,?);'
def participation():
# participative_editors total_edits bin 0_100, 100_500, 500_1000, 1000_5000, 5000_10000, 10000_50000, 50000_100000, 100000_500000, 500000_1000000, 1000000_1000000000000
parameters = []
edit_bins_count = {}
query = 'SELECT count(user_id), abs_value, year_month FROM '+languagecode+'wiki_editor_metrics WHERE metric_name = "edit_count_bin" GROUP by abs_value;'
for row in cursor.execute(query):
m1_count = row[0]
m1_value = row[1]
year_month = row[2]
edit_bins_count[m1_value] = m1_count
parameters.append((year_month, 'editor_participation', 'total_edits', 'bin', m1_value, None, None, None, m1_count, None))
cursor.executemany(query_cm,parameters)
conn.commit()
# participative_editors total_edits bin 0_100, 100_500, 500_1000, 1000_5000, 5000_10000, 10000_50000, 50000_100000, 100000_500000, 500000_1000000, 1000000_1000000000000 monthly_edits threshold 5
parameters = []
query = 'SELECT count(e1.user_id), e1.abs_value, e1.year_month FROM '+languagecode+'wiki_editor_metrics e1 INNER JOIN '+languagecode+'wiki_editor_metrics e2 ON e1.user_id = e2.user_id WHERE e1.metric_name = "edit_count_bin" AND e2.metric_name = "monthly_edits" AND e2.abs_value >= 5 GROUP BY e1.abs_value ORDER BY e1.abs_value DESC;'
for row in cursor.execute(query):
m2_count = row[0]
m1_value = row[1]
year_month = row[2]
m1_count = edit_bins_count[m1_value]
parameters.append((year_month, 'editor_participation', 'total_edits', 'bin', m1_value, 'monthly_edits', 'threshold', 5, m1_count, m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
# participative_editors total_edits bin 0_100, 100_500, 500_1000, 1000_5000, 5000_10000, 10000_50000, 50000_100000, 100000_500000, 500000_1000000, 1000000_1000000000000 year_first_edit bin 2001-2021
parameters = []
query = 'SELECT count(e1.user_id), e1.abs_value, e2.year_first_edit, e1.year_month FROM '+languagecode+'wiki_editor_metrics e1 INNER JOIN '+languagecode+'wiki_editors e2 ON e1.user_id = e2.user_id WHERE e1.metric_name = "edit_count_bin" GROUP by e1.abs_value, e2.year_first_edit;'
for row in cursor.execute(query):
m2_count = row[0]
m1_value = row[1]
m2_value = row[2]
m1_count = edit_bins_count[m1_value]
year_month = row[3]
parameters.append((year_month, 'editor_participation', 'total_edits', 'bin', m1_value, 'year_first_edit', 'bin', m2_value, m1_count, m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
# participative_editors total_edits bin 0_100, 100_500, 500_1000, 1000_5000, 5000_10000, 10000_50000, 50000_100000, 100000_500000, 500000_1000000, 1000000_1000000000000 lustrum_first_edit bin 2001, 2006, 2011, 2016, 2021
parameters = []
query = 'SELECT count(e1.user_id), e1.abs_value, e2.lustrum_first_edit, e1.year_month FROM '+languagecode+'wiki_editor_metrics e1 INNER JOIN '+languagecode+'wiki_editors e2 ON e1.user_id = e2.user_id WHERE e1.metric_name = "edit_count_bin" GROUP by e1.abs_value, e2.lustrum_first_edit;'
for row in cursor.execute(query):
m2_count = row[0]
m1_value = row[1]
m2_value = row[2]
m1_count = edit_bins_count[m1_value]
year_month = row[3]
parameters.append((year_month, 'editor_participation', 'total_edits', 'bin', m1_value, 'lustrum_first_edit', 'bin', m2_value, m1_count, m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
editing_days = {(1,100):'1-100',(101,500):'101-500', (501,1000):'501-1000', (1001,1500):'1001-1500', (1501,2500):'1501-2500', (2501,3500):'2501-3500', (3501,4500):'3501-4500', (4501,5500):'4501-5500', (5501,6500):'5501-6500', (6501,7500):'6501-7500'}
percent_editing_days = {(0,10):'0-10',(11,20):'11-20',(21,30):'21-30',(31,40):'31-40',(41,50):'41-50',(51,60):'51-60',(61,70):'61-70',(71,80):'71-80',(81,90):'81-90',(91,100):'91-100'}
active_months = {(0,0):'0', (217, 228): '217-228', (301, 312): '301-312', (277, 288): '277-288', (25, 36): '25-36', (241, 252): '241-252', (109, 120): '109-120', (85, 96): '85-96', (61, 72): '61-72', (205, 216): '205-216', (289, 300): '289-300', (193, 204): '193-204', (73, 84): '73-84', (49, 60): '49-60', (37, 48): '37-48', (265, 276): '265-276', (181, 192): '181-192', (145, 156): '145-156', (13, 24): '13-24', (253, 264): '253-264', (133, 144): '133-144', (1, 12): '1-12', (121, 132): '121-132', (169, 180): '169-180', (157, 168): '157-168', (229, 240): '229-240', (97, 108): '97-108'}
bin_dicts = {'editing_days':editing_days, 'percent_editing_days':percent_editing_days, 'active_months':active_months}
# participative_editors total_edits bin 0_100, 100_500, 500_1000, 1000_5000, 5000_10000, 10000_50000, 50000_100000, 100000_500000, 500000_1000000, 1000000_1000000000000 editing_days bin 0_100, 100_500, 500_1000, 1000-1500, 1500-2500, 2500-3500, 3500-5000, 5000-6500, 6500-7500…
# participative_editors total_edits bin 0_100, 100_500, 500_1000, 1000_5000, 5000_10000, 10000_50000, 50000_100000, 100000_500000, 500000_1000000, 1000000_1000000000000 percent_editing_days bin 1-10 to 100
# participative_editors total_edits bin 0_100, 100_500, 500_1000, 1000_5000, 5000_10000, 10000_50000, 50000_100000, 100000_500000, 500000_1000000, 1000000_1000000000000 active_months bin 1-10, 10-20, 30-40,... to 150
parameters = []
for variable_name, bin_dict in bin_dicts.items():
for interval, label in bin_dict.items():
query = 'SELECT count(e1.user_id), e1.abs_value, e1.year_month, "'+label+'" FROM '+languagecode+'wiki_editor_metrics e1 INNER JOIN '+languagecode+'wiki_editors e2 ON e1.user_id = e2.user_id WHERE e1.metric_name = "edit_count_bin" AND e2.'+variable_name+' BETWEEN '+str(interval[0])+' AND '+str(interval[1])+' GROUP by e1.abs_value;'
m2_count = row[0]
m1_value = row[1]
m2_value = label
year_month = row[2]
m1_count = edit_bins_count[m1_value]
parameters.append((year_month, 'editor_participation', 'total_edits', 'bin', m1_value, variable_name, 'bin', m2_value, m1_count, m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
# participative_editors total_edits bin 0_100, 100_500, 500_1000, 1000_5000, 5000_10000, 10000_50000, 50000_100000, 100000_500000, 500000_1000000, 1000000_1000000000000 flag name sysop, autopatrolled, bureaucrat, etc.
parameters = []
query = 'SELECT count(e1.user_id), e1.abs_value, e2.highest_flag, e1.year_month FROM '+languagecode+'wiki_editor_metrics e1 INNER JOIN '+languagecode+'wiki_editors e2 ON e1.user_id = e2.user_id WHERE e1.metric_name = "edit_count_bin" GROUP by e1.abs_value, e2.highest_flag;'
for row in cursor.execute(query):
m2_count = row[0]
m1_value = row[1]
m2_value = row[2]
year_month = row[3]
m1_count = edit_bins_count[m1_value]
parameters.append((year_month, 'editor_participation', 'total_edits', 'bin', m1_value, 'highest_flag', 'name', m2_value, m1_count, m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
print ('editor_participation')
def flags():
# flags granted_flag name sysop, autopatrolled, bureaucrat, etc.
# flags removed_flag name sysop, autopatrolled, bureaucrat, etc.
for variablef in ['granted_flag','removed_flag']:
parameters = []
year_month = cycle_year_month
query = 'SELECT count(user_id), abs_value, year_month FROM '+languagecode+'wiki_editor_metrics WHERE metric_name = "'+variablef+'" AND abs_value != "bot" GROUP BY year_month, abs_value;'
for row in cursor.execute(query):
m1_count = row[0]
m1_value = row[1]
year_month = row[2]
parameters.append((year_month, 'editor_flags', 'highest_flag', 'name', m1_value, None, None, None, m1_count, None))
cursor.executemany(query_cm,parameters)
conn.commit()
# flags highest_flag name sysop, autopatrolled, bureaucrat, etc.
parameters = []
highest_flag_count = {}
year_month = cycle_year_month
query = 'SELECT count(user_id), highest_flag FROM '+languagecode+'wiki_editors GROUP by highest_flag;'
for row in cursor.execute(query):
m1_count = row[0]
m1_value = row[1]
highest_flag_count[m1_value] = m1_count
parameters.append((year_month, 'editor_flags', 'highest_flag', 'name', m1_value, None, None, None, m1_count, None))
cursor.executemany(query_cm,parameters)
conn.commit()
# flags highest_flag name sysop, autopatrolled, bureaucrat, etc. monthly_edits threshold 5
parameters = []
query = 'SELECT count(e1.user_id), e1.highest_flag, e2.year_month FROM '+languagecode+'wiki_editors e1 INNER JOIN '+languagecode+'wiki_editor_metrics e2 ON e1.user_id = e2.user_id WHERE e2.metric_name = "monthly_edits" AND e2.abs_value >= 5 GROUP BY e1.highest_flag, e2.year_month ORDER BY e1.highest_flag, e2.year_month ASC;'
for row in cursor.execute(query):
m1_count = highest_flag_count[m1_value]
m2_count = row[0]
m1_value = row[1]
year_month = row[2]
parameters.append((year_month, 'editor_flags', 'highest_flag', 'name', m1_value, 'monthly_edits', 'threshold', 5, m1_count, m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
# flags highest_flag name sysop, autopatrolled, bureaucrat, etc. highest_flag_year_month bin 2001-2021 x: g1, y: g2 (last year_month)
# flags highest_flag name sysop, autopatrolled, bureaucrat, etc. year_first_edit bin 2001-2021
# flags highest_flag name sysop, autopatrolled, bureaucrat, etc. lustrum_first_edit bin 2001, 2006, 2011, 2016, 2021
m2s = ['highest_flag_year_month', 'year_first_edit','lustrum_first_edit']
for g2 in m2s:
parameters = []
query = 'SELECT count(user_id), highest_flag, '+g2+' FROM '+languagecode+'wiki_editors GROUP BY highest_flag, '+g2
year_month = cycle_year_month
for row in cursor.execute(query):
m2_count = row[0]
m1_value = row[1]
m2_value = row[2]
m1_count = highest_flag_count[m1_value]
parameters.append((year_month, 'editor_flags', 'highest_flag', 'name', m1_value, g2, 'bin', m2_value, m1_count, m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
editing_days = {(1,100):'1-100',(101,500):'101-500', (501,1000):'501-1000', (1001,1500):'1001-1500', (1501,2500):'1501-2500', (2501,3500):'2501-3500', (3501,4500):'3501-4500', (4501,5500):'4501-5500', (5501,6500):'5501-6500', (6501,7500):'6501-7500'}
percent_editing_days = {(0,10):'0-10',(11,20):'11-20',(21,30):'21-30',(31,40):'31-40',(41,50):'41-50',(51,60):'51-60',(61,70):'61-70',(71,80):'71-80',(81,90):'81-90',(91,100):'91-100'}
active_months = {(0,0):'0', (217, 228): '217-228', (301, 312): '301-312', (277, 288): '277-288', (25, 36): '25-36', (241, 252): '241-252', (109, 120): '109-120', (85, 96): '85-96', (61, 72): '61-72', (205, 216): '205-216', (289, 300): '289-300', (193, 204): '193-204', (73, 84): '73-84', (49, 60): '49-60', (37, 48): '37-48', (265, 276): '265-276', (181, 192): '181-192', (145, 156): '145-156', (13, 24): '13-24', (253, 264): '253-264', (133, 144): '133-144', (1, 12): '1-12', (121, 132): '121-132', (169, 180): '169-180', (157, 168): '157-168', (229, 240): '229-240', (97, 108): '97-108'}
bin_dicts = {'editing_days':editing_days, 'percent_editing_days':percent_editing_days}
# flags highest_flag name sysop, autopatrolled, bureaucrat, etc. editing_days bin 1-100, 100-200, etc.
# flags highest_flag name sysop, autopatrolled, bureaucrat, etc. percent_editing_days bin 1-10 to 100
for variable_name, bin_dict in bin_dicts.items():
parameters = []
for interval, label in bin_dict.items():
query = 'SELECT count(e1.user_id), e1.abs_value, e1.year_month, "'+label+'" FROM '+languagecode+'wiki_editor_metrics e1 INNER JOIN '+languagecode+'wiki_editors e2 ON e1.user_id = e2.user_id WHERE e1.metric_name = "edit_count_bin" AND e2.'+variable_name+' BETWEEN '+str(interval[0])+' AND '+str(interval[1])+' GROUP by e1.abs_value;'
m2_count = row[0]
m1_value = row[1]
m2_value = label
year_month = row[2]
m1_count = highest_flag_count[m1_value]
parameters.append((year_month, 'editor_flags', 'highest_flag', 'name', m1_value, variable_name, 'bin', m2_value, m1_count, m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
# flags highest_flag name sysop, autopatrolled, bureaucrat, etc. active_months bin 1-10, 10-20, 30-40,... to 150
for interval, label in active_months.items():
query = 'SELECT count(e1.user_id), e1.abs_value, e1.year_month, "'+label+'" FROM '+languagecode+'wiki_editor_metrics e1 INNER JOIN '+languagecode+'wiki_editors e2 ON e1.user_id = e2.user_id WHERE e1.metric_name = "edit_count_bin" AND e2.metric_name = "active_months" AND e2.abs_value BETWEEN '+str(interval[0])+' AND '+str(interval[0])+' GROUP by e1.abs_value;'
m2_count = row[0]
m1_value = row[1]
m2_value = label
year_month = row[2]
m1_count = highest_flag_count[m1_value]
parameters.append((year_month, 'editor_flags', 'highest_flag', 'name', m1_value, "active_months", 'bin', m2_value, m1_count, m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
# flags highest_flag name sysop, autopatrolled, bureaucrat, etc. total_edits bin 0_100, 100_500, 500_1000, 1000_5000, 5000_10000, 10000_50000, 50000_100000, 100000_500000, 500000_1000000, 1000000_1000000000000
parameters = []
query = 'SELECT count(e1.user_id), e1.highest_flag, e2.abs_value, e2.year_month FROM '+languagecode+'wiki_editors e1 INNER JOIN '+languagecode+'wiki_editor_metrics e2 ON e1.user_id = e2.user_id WHERE e2.metric_name = "edit_count_bin" GROUP BY e1.highest_flag, e2.abs_value;'
for row in cursor.execute(query):
m1_count = highest_flag_count[m1_value]
m2_count = row[0]
m1_value = row[1]
m2_value = row[2]
year_month = row[3]
parameters.append((year_month, 'editor_flags', 'highest_flag', 'name', m1_value, 'total_edits', 'bin', m2_value, m1_count, m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
print ('editor_flags')
# ACTIVE CONTRIBUTORS (THRESHOLD, NO BINS)
# active_editors, active_editors_5, active_editors_10, active_editors_50, active_editors_100, active_editors_500, active_editors_1000
def active_editors():
# active_editors monthly_edits threshold 1, 5, 10, 50, 100, 500, 1000
active_editors_5_year_month = {}
values = [1,5,10,50,100,500,1000,5000]
parameters = []
for v in values:
query = 'SELECT count(distinct user_id), year_month FROM '+languagecode+'wiki_editor_metrics WHERE metric_name = "monthly_edits" AND abs_value >= '+str(v)+' GROUP BY year_month ORDER BY year_month'
for row in cursor.execute(query):
# print (row)
m1_count=row[0];
year_month=row[1]
if year_month == '': continue
parameters.append((year_month, 'active_editors', 'monthly_edits', 'threshold', v, None, None, None, m1_count, None))
if v == 5: active_editors_5_year_month[year_month] = m1_count
cursor.executemany(query_cm,parameters)
conn.commit()
# active_editors monthly_edits bin 1, 5, 10, 50, 100, 500, 1000
parameters = []
values = [1,5,10,50,100,500,1000,5000]
for x in range(0,len(values)):
v = values[x]
if x < len(values)-1:
w = values[x+1]
query = 'SELECT count(distinct user_id), year_month FROM '+languagecode+'wiki_editor_metrics WHERE metric_name = "monthly_edits" AND abs_value >= '+str(v)+' AND abs_value < '+str(w)+' GROUP BY year_month ORDER BY year_month'
w = w - 1
else:
w = 'inf'
query = 'SELECT count(distinct user_id), year_month FROM '+languagecode+'wiki_editor_metrics WHERE metric_name = "monthly_edits" AND abs_value >= '+str(v)+' GROUP BY year_month ORDER BY year_month'
# print (query)
for row in cursor.execute(query):
# print (row)
m1_count=row[0];
year_month=row[1]
if year_month == '': continue
parameters.append((year_month, 'active_editors', 'monthly_edits', 'bin', str(v)+'_'+str(w) , None, None, None, m1_count, None))
cursor.executemany(query_cm,parameters)
conn.commit()
# active_editors monthly_edits threshold 5 year_first_edit bin 2001-2021
query = 'SELECT count(e1.user_id), e1.year_month, e2.year_first_edit FROM '+languagecode+'wiki_editor_metrics e1 INNER JOIN '+languagecode+'wiki_editors e2 on e1.user_id = e2.user_id WHERE e1.metric_name = "monthly_edits" AND e1.abs_value >= 5 GROUP BY e1.year_month, e2.year_first_edit;'
parameters = []
for row in cursor.execute(query):
# print (row)
m2_count=row[0];
year_month=row[1]
year_first_edit=row[2]
if year_month == '': continue
parameters.append((year_month, 'active_editors', 'monthly_edits', 'threshold', 5, 'year_first_edit', 'bin', year_first_edit, active_editors_5_year_month[year_month], m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
# active_editors monthly_edits threshold 5 lustrum_first_edit bin 2001, 2006, 2011, 2016, 2021
query = 'SELECT count(e1.user_id), e1.year_month, e2.lustrum_first_edit FROM '+languagecode+'wiki_editor_metrics e1 INNER JOIN '+languagecode+'wiki_editors e2 on e1.user_id = e2.user_id WHERE e1.metric_name = "monthly_edits" AND e1.abs_value >= 5 GROUP BY e1.year_month, e2.lustrum_first_edit;'
parameters = []
for row in cursor.execute(query):
# print (row)
m2_count=row[0];
year_month=row[1]
lustrum_first_edit=row[2]
if year_month == '': continue
parameters.append((year_month, 'active_editors', 'monthly_edits', 'threshold', 5, 'lustrum_first_edit', 'bin', lustrum_first_edit, active_editors_5_year_month[year_month], m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
# active_editors monthly_edits threshold 5 active_months bin 1-10, 10-20, 30-40,... to 150
active_months = {(0,0):'0', (217, 228): '217-228', (301, 312): '301-312', (277, 288): '277-288', (25, 36): '25-36', (241, 252): '241-252', (109, 120): '109-120', (85, 96): '85-96', (61, 72): '61-72', (205, 216): '205-216', (289, 300): '289-300', (193, 204): '193-204', (73, 84): '73-84', (49, 60): '49-60', (37, 48): '37-48', (265, 276): '265-276', (181, 192): '181-192', (145, 156): '145-156', (13, 24): '13-24', (253, 264): '253-264', (133, 144): '133-144', (1, 12): '1-12', (121, 132): '121-132', (169, 180): '169-180', (157, 168): '157-168', (229, 240): '229-240', (97, 108): '97-108'}
parameters = []
for interval, label in active_months.items():
query = 'SELECT count(e1.user_id), e1.year_month FROM '+languagecode+'wiki_editor_metrics e1 INNER JOIN '+languagecode+'wiki_editor_metrics e2 ON e1.user_id = e2.user_id WHERE e1.metric_name = "monthly_edits" AND e1.abs_value >= 5 AND e2.metric_name = "active_months" AND e2.abs_value BETWEEN '+str(interval[0])+' AND '+str(interval[1])+' GROUP by e1.year_month, e1.abs_value;'
for row in cursor.execute(query):
m2_count = row[0]
year_month = row[1]
m2_value = label
m1_count = active_editors_5_year_month[year_month]
parameters.append((year_month, 'active_editors', 'monthly_edits', 'threshold', 5, "active_months", 'bin', m2_value, m1_count, m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
# active_editors monthly_edits threshold 5 active_months_row bin 2, 3, 4, 5, …
# active_editors monthly_edits threshold 5 max_active_months_row bin 2, 3, 4, 5, …
# active_editors monthly_edits threshold 5 inactive_months_row bin -1, 0, 1, 2, 3, 4, 5, … 12, …
# active_editors monthly_edits threshold 5 max_inactive_months_row bin 2, 3, 4, 5, …
m2s = ['inactivity_periods','active_months_row', 'inactive_months_row','max_active_months_row','max_inactive_months_row', 'monthly_edits_increasing_decreasing']
for m2 in m2s:
parameters = []
query = 'SELECT count(e1.user_id), e2.abs_value, e1.year_month FROM '+languagecode+'wiki_editor_metrics e1 INNER JOIN '+languagecode+'wiki_editor_metrics e2 ON e1.user_id = e2.user_id WHERE e1.metric_name = "monthly_edits" AND e1.abs_value >= 5 AND e2.metric_name = "'+m2+'" GROUP BY e1.year_month, e2.abs_value;'
for row in cursor.execute(query):
m2_count = row[0]
m2_value = row[1]
year_month = row[2]
m1_count = active_editors_5_year_month[year_month]
parameters.append((year_month, 'active_editors', 'monthly_edits', 'threshold', 5, m2, 'bin', m2_value, m1_count, m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
# active_editors monthly_edits threshold 5 total_edits bin 0_100, 100_500, 500_1000, 1000_5000, 5000_10000, 10000_50000, 50000_100000, 100000_500000, 500000_1000000, 1000000_1000000000000
parameters = []
query = 'SELECT count(e1.user_id), e2.abs_value, e1.year_month FROM '+languagecode+'wiki_editor_metrics e1 INNER JOIN '+languagecode+'wiki_editor_metrics e2 ON e1.user_id = e2.user_id WHERE e1.metric_name = "monthly_edits" AND e1.abs_value >= 5 AND e2.metric_name = "edit_count_bin" GROUP BY e2.abs_value;'
for row in cursor.execute(query):
m2_count = row[0]
m2_value = row[1]
year_month = row[2]
m1_count = active_editors_5_year_month[year_month]
parameters.append((year_month, 'active_editors', 'monthly_edits', 'threshold', 5, 'total_edits', 'bin', m2_value, m1_count, m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
# active_editors monthly_edits threshold 5 flag name sysop, autopatrolled, bureaucrat, etc.
parameters = []
query = 'SELECT count(e1.user_id), e2.highest_flag, e1.year_month FROM '+languagecode+'wiki_editor_metrics e1 INNER JOIN '+languagecode+'wiki_editors e2 ON e1.user_id = e2.user_id WHERE e1.metric_name = "monthly_edits" AND e1.abs_value >= 5 GROUP by e1.abs_value, e2.highest_flag;'
for row in cursor.execute(query):
m2_count = row[0]
m2_value = row[1]
year_month = row[2]
m1_count = active_editors_5_year_month[year_month]
parameters.append((year_month, 'active_editors', 'monthly_edits', 'threshold', 5, 'highest_flag', 'name', m2_value, m1_count, m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
# active_editors monthly_edits threshold 5 monthly_editing_days bin 1-10 to 100
parameters = []
query = 'SELECT count(e1.user_id), e2.abs_value, e1.year_month FROM '+languagecode+'wiki_editor_metrics e1 INNER JOIN '+languagecode+'wiki_editor_metrics e2 ON e1.user_id = e2.user_id WHERE e1.metric_name = "monthly_edits" AND e1.abs_value >= 5 AND e2.metric_name = "monthly_editing_days" GROUP BY e1.year_month, e2.abs_value;'
for row in cursor.execute(query):
m2_count = row[0]
m2_value = row[1]
year_month = row[2]
m1_count = active_editors_5_year_month[year_month]
parameters.append((year_month, 'active_editors', 'monthly_edits', 'threshold', 5, 'monthly_editing_days', 'bin', m2_value, m1_count, m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
print ('active_editors')
def retention():
# monthly_registered_first_edit
parameters = []
registered_baseline = {}
query = 'SELECT count(distinct user_id), year_month_registration FROM '+languagecode+'wiki_editors GROUP BY 2 ORDER BY 2 ASC;'
for row in cursor.execute(query):
value=row[0];
year_month=row[1]
if year_month == '': continue
try: registered_baseline[year_month] = int(value)
except: pass
parameters.append((year_month, 'editor_retention', 'register', 'threshold', 1, None, None, None, value, None))
retention_baseline = {}
query = 'SELECT count(distinct user_id), year_month_first_edit FROM '+languagecode+'wiki_editors GROUP BY 2 ORDER BY 2 ASC;'
for row in cursor.execute(query):
value=row[0];
year_month=row[1]
if year_month == '': continue
try: retention_baseline[year_month] = int(value)
except: pass
parameters.append((year_month, 'editor_retention', 'first_edit', 'threshold', 1, None, None, None, value, None))
try:
m1_count = registered_baseline[year_month]
except:
m1_count = 0
parameters.append((year_month, 'editor_retention', 'register', 'threshold', 1, 'first_edit', 'threshold', 1, m1_count, value))
cursor.executemany(query_cm,parameters)
conn.commit()
parameters = []
queries_retention_dict = {}
# RETENTION
# number of editors who edited at least once 24h after the first edit
queries_retention_dict['24h'] = 'SELECT count(distinct ch.user_id), ch.year_month_first_edit FROM '+languagecode+'wiki_editors ch INNER JOIN '+languagecode+'wiki_editor_metrics ce ON ch.user_id = ce.user_id WHERE ce.metric_name = "edit_count_24h" AND ce.abs_value > 0 GROUP BY 2 ORDER BY 2 ASC;'
# number of editors who edited at least once 7 days after the first edit
queries_retention_dict['7d'] = 'SELECT count(distinct ch.user_id), ch.year_month_first_edit FROM '+languagecode+'wiki_editors ch INNER JOIN '+languagecode+'wiki_editor_metrics ce ON ch.user_id = ce.user_id WHERE ce.metric_name = "edit_count_7d" AND ce.abs_value > 0 GROUP BY 2 ORDER BY 2 ASC;'
# number of editors who edited at least once 30 days after the first edit
queries_retention_dict['30d'] = 'SELECT count(distinct ch.user_id), ch.year_month_first_edit FROM '+languagecode+'wiki_editors ch INNER JOIN '+languagecode+'wiki_editor_metrics ce ON ch.user_id = ce.user_id WHERE ce.metric_name = "edit_count_30d" AND ce.abs_value > 0 GROUP BY 2 ORDER BY 2 ASC;'
# number of editors who edited at least once 60 days after the first edit
queries_retention_dict['60d'] = 'SELECT count(distinct ch.user_id), ch.year_month_first_edit FROM '+languagecode+'wiki_editors ch INNER JOIN '+languagecode+'wiki_editor_metrics ce ON ch.user_id = ce.user_id WHERE ce.metric_name = "edit_count_60d" AND ce.abs_value > 0 GROUP BY 2 ORDER BY 2 ASC;'
# number of editors who edited at least once 365 days after the first edit
queries_retention_dict['365d'] = 'SELECT count(distinct user_id), year_month_first_edit FROM '+languagecode+'wiki_editors WHERE lifetime_days >= 365 GROUP BY 2 ORDER BY 1;'
# number of editors who edited at least once 730 days after the first edit
queries_retention_dict['730d'] = 'SELECT count(distinct user_id), year_month_first_edit FROM '+languagecode+'wiki_editors WHERE lifetime_days >= 730 GROUP BY 2 ORDER BY 1;'
for metric_name, query in queries_retention_dict.items():
for row in cursor.execute(query):
value=row[0];
year_month=row[1]
if year_month == '': continue
try: m1_count = retention_baseline[year_month]
except: m1_count = 0
parameters.append((year_month, 'editor_retention', 'first_edit', 'threshold', 1, 'edited_after_time', 'threshold', metric_name, m1_count, value))
try: m1_count = registered_baseline[year_month]
except: m1_count = 0
parameters.append((year_month, 'editor_retention', 'register', 'threshold', 1, 'edited_after_time', 'threshold', metric_name, m1_count, value))
cursor.executemany(query_cm,parameters)
conn.commit()
parameters = []
queries_retention_dict = {}
# USER PAGES
# number of editors who edited their user_page at least once during the first 24h after their first edit
queries_retention_dict['editors_edited_user_page_d24h_afe'] = 'SELECT count(distinct ch.user_id), ch.year_month_first_edit FROM '+languagecode+'wiki_editors ch INNER JOIN '+languagecode+'wiki_editor_metrics ce ON ch.user_id = ce.user_id WHERE ce.metric_name = "user_page_edit_count_24h" AND ce.abs_value > 0 GROUP BY 2 ORDER BY 2 ASC;'
# number of editors who edited their user_page at least once during the first 30 days after their first edit
queries_retention_dict['editors_edited_user_page_d30d_afe'] = 'SELECT count(distinct ch.user_id), ch.year_month_first_edit FROM '+languagecode+'wiki_editors ch INNER JOIN '+languagecode+'wiki_editor_metrics ce ON ch.user_id = ce.user_id WHERE ce.metric_name = "user_page_edit_count_1month" AND ce.abs_value > 0 GROUP BY 2 ORDER BY 2 ASC;'
# number of editors who edited their user_page at least once
queries_retention_dict['editors_edited_user_page_afe'] = 'SELECT count(distinct ch.user_id), ch.year_month_first_edit FROM '+languagecode+'wiki_editors ch INNER JOIN '+languagecode+'wiki_editor_metrics ce ON ch.user_id = ce.user_id WHERE ce.metric_name = "monthly_edits_ns2_user" AND ce.abs_value > 0 GROUP BY 2 ORDER BY 2 ASC;'
for metric_name, query in queries_retention_dict.items():
for row in cursor.execute(query):
value=row[0];
year_month=row[1]
if year_month == '': continue
try: m1_count = retention_baseline[year_month]
except: m1_count = 0
parameters.append((year_month, 'editor_retention', 'first_edit', 'threshold', 1, 'edited_user_page_after_time', 'threshold', metric_name, m1_count, value))
try: m1_count = registered_baseline[year_month]
except: m1_count = 0
parameters.append((year_month, 'editor_retention', 'register', 'threshold', 1, 'edited_user_page_after_time', 'threshold', metric_name, m1_count, value))
cursor.executemany(query_cm,parameters)
conn.commit()
parameters = []
queries_retention_dict = {}
# USER PAGE TALK PAGE
# number of editors who edited their user_page_talk_page at least once during the first 24h after their first edit
queries_retention_dict['editors_edited_user_page_talk_page_d24h_afe'] = 'SELECT count(distinct ch.user_id), ch.year_month_first_edit FROM '+languagecode+'wiki_editors ch INNER JOIN '+languagecode+'wiki_editor_metrics ce ON ch.user_id = ce.user_id WHERE ce.metric_name = "user_page_talk_page_edit_count_24h" AND ce.abs_value > 0 GROUP BY 2 ORDER BY 2 ASC;'
# number of editors who edited their user_page_talk_page at least once during the first 30 daysafter their first edit
queries_retention_dict['editors_edited_user_page_talk_page_d30d_afe'] = 'SELECT count(distinct ch.user_id), ch.year_month_first_edit FROM '+languagecode+'wiki_editors ch INNER JOIN '+languagecode+'wiki_editor_metrics ce ON ch.user_id = ce.user_id WHERE ce.metric_name = "user_page_talk_page_edit_count_1month" AND ce.abs_value > 0 GROUP BY 2 ORDER BY 2 ASC;'
# number of editors who edited their user_page_talk_page at least once after the first edit
queries_retention_dict['editors_edited_user_page_talk_page_afe'] = 'SELECT count(distinct ch.user_id), ch.year_month_first_edit FROM '+languagecode+'wiki_editors ch INNER JOIN '+languagecode+'wiki_editor_metrics ce ON ch.user_id = ce.user_id WHERE ce.metric_name = "monthly_edits_ns3_user_talk" AND ce.abs_value > 0 GROUP BY 2 ORDER BY 2 ASC;'
for metric_name, query in queries_retention_dict.items():
for row in cursor.execute(query):
value=row[0];
year_month=row[1]
if year_month == '': continue
try: m1_count = retention_baseline[year_month]
except: m1_count = 0
parameters.append((year_month, 'editor_retention', 'first_edit', 'threshold', 1, 'edited_user_page_talk_page_after_time', 'threshold', metric_name, m1_count, value))
try: m1_count = registered_baseline[year_month]
except: m1_count = 0
parameters.append((year_month, 'editor_retention', 'register', 'threshold', 1, 'edited_user_page_talk_page_after_time', 'threshold', metric_name, m1_count, value))
cursor.executemany(query_cm,parameters)
conn.commit()
print ('editor_retention')
def drop_off():
year_month = cycle_year_month
lustrum_first_edit_dict = {}
query = 'SELECT count(user_id), lustrum_first_edit FROM '+languagecode+'wiki_editors WHERE lustrum_first_edit != "" GROUP BY lustrum_first_edit;'
for row in cursor.execute(query):
lustrum_first_edit_dict[row[1]]=row[0]
year_first_edit_dict = {}
query = 'SELECT count(user_id), year_first_edit FROM '+languagecode+'wiki_editors WHERE year_first_edit != "" GROUP BY year_first_edit;'
for row in cursor.execute(query):
year_first_edit_dict[row[1]]=row[0]
edit_bins_count = {}
query = 'SELECT count(user_id), abs_value, year_month FROM '+languagecode+'wiki_editor_metrics WHERE metric_name = "edit_count_bin" GROUP by abs_value;'
for row in cursor.execute(query):
edit_bins_count[row[1]] = row[0]
highest_flag_dict = {}
query = 'SELECT count(user_id), highest_flag FROM '+languagecode+'wiki_editors WHERE highest_flag != "" GROUP BY highest_flag;'
for row in cursor.execute(query):
highest_flag_dict[row[1]]=row[0]
# registered_editors lustrum_first_edit bin 2001, 2006, 2011, 2016, 2020 year_last_edit bin 2001-2021 (180 days since last edit)
parameters = []
query = 'SELECT count(user_id), lustrum_first_edit, year_last_edit FROM '+languagecode+'wiki_editors WHERE lustrum_first_edit != "" AND days_since_last_edit >= 180 GROUP BY lustrum_first_edit, year_last_edit ORDER BY lustrum_first_edit, year_last_edit;'
for row in cursor.execute(query):
m2_count = row[0]
m1_value = row[1]
m2_value = row[2]
parameters.append((year_month, 'editor_drop_off', 'lustrum_first_edit', 'bin', m1_value, 'year_last_edit', 'bin', m2_value, lustrum_first_edit_dict[m1_value], m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
# registered_editors year_first_edit bin 2001-2021 year_last_edit bin 2001-2021 (180 days since last edit)
parameters = []
query = 'SELECT count(user_id), year_first_edit, year_last_edit FROM '+languagecode+'wiki_editors WHERE year_first_edit != "" AND days_since_last_edit >= 180 GROUP BY year_first_edit, year_last_edit ORDER BY year_first_edit, year_last_edit;'
for row in cursor.execute(query):
m2_count = row[0]
m1_value = row[1]
m2_value = row[2]
parameters.append((year_month, 'editor_drop_off', 'year_first_edit', 'bin', m1_value, 'year_last_edit', 'bin', m2_value, year_first_edit_dict[m1_value], m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
# participative_editors total_edits bin 0_100, 100_500, 500_1000, 1000_5000, 5000_10000, 10000_50000, 50000_100000, 100000_500000, 500000_1000000, 1000000_1000000000000 year_last_edit bin 2001-2021
parameters = []
query = 'SELECT count(e1.user_id), e1.abs_value, e2.year_last_edit, e1.year_month FROM '+languagecode+'wiki_editor_metrics e1 INNER JOIN '+languagecode+'wiki_editors e2 ON e1.user_id = e2.user_id WHERE e1.metric_name = "edit_count_bin" GROUP by e1.abs_value, e2.year_last_edit;'
for row in cursor.execute(query):
m2_count = row[0]
m1_value = row[1]
m2_value = row[2]
m1_count = edit_bins_count[m1_value]
year_month = row[3]
parameters.append((year_month, 'editor_drop_off', 'total_edits', 'bin', m1_value, 'year_last_edit', 'bin', m2_value, m1_count, m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
# participative_editors total_edits bin 0_100, 100_500, 500_1000, 1000_5000, 5000_10000, 10000_50000, 50000_100000, 100000_500000, 500000_1000000, 1000000_1000000000000 over_past_max_inactive_months_row threshold
# participative_editors total_edits bin 0_100, 100_500, 500_1000, 1000_5000, 5000_10000, 10000_50000, 50000_100000, 100000_500000, 500000_1000000, 1000000_1000000000000 over_edit_bin_average_past_max_inactive_months_row threshold
m2s = ['over_past_max_inactive_months_row','over_edit_bin_average_past_max_inactive_months_row','over_monthly_edit_bin_average_past_max_inactive_months_row']
for m2 in m2s:
parameters = []
query = 'SELECT count(e1.user_id), e1.abs_value FROM '+languagecode+'wiki_editor_metrics e1 INNER JOIN '+languagecode+'wiki_editor_metrics e2 ON e1.user_id = e2.user_id WHERE e1.metric_name = "edit_count_bin" AND e2.metric_name = "'+m2+'" AND e2.abs_value > 0 GROUP by e1.abs_value;'
for row in cursor.execute(query):
m2_count = row[0]
m1_value = row[1]
m1_count = edit_bins_count[m1_value]
parameters.append((year_month, 'editor_drop_off', 'total_edits', 'bin', m1_value, m2, 'threshold', 0, m1_count, m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
# registered_editors lustrum_first_edit bin 2001, 2006, 2011, 2016, 2020 over_past_max_inactive_months_row threshold > 0
# registered_editors year_first_edit bin 2001-2021 over_edit_bin_average_past_max_inactive_months_row threshold > 0
# registered_editors lustrum_first_edit bin 2001, 2006, 2011, 2016, 2020 over_edit_bin_average_past_max_inactive_months_row threshold > 0
# registered_editors year_first_edit bin 2001-2021 over_past_max_inactive_months_row threshold > 0
m1s = ['year_first_edit','lustrum_first_edit']
m2s = ['over_past_max_inactive_months_row','over_edit_bin_average_past_max_inactive_months_row','over_monthly_edit_bin_average_past_max_inactive_months_row']
for m1 in m1s:
for m2 in m2s:
parameters = []
query = 'SELECT count(e1.user_id), e1.'+m1+' FROM '+languagecode+'wiki_editors e1 INNER JOIN '+languagecode+'wiki_editor_metrics e2 ON e1.user_id = e2.user_id WHERE e2.metric_name = "'+m2+'" AND e2.abs_value > 0 GROUP by e1.'+m1+';'
for row in cursor.execute(query):
m2_count = row[0]
m1_value = row[1]
if m1 == 'year_first_edit':
try:
m1_count = year_first_edit_dict[m1_value]
except:
m1_count = 0
elif m1 == 'lustrum_first_edit':
try:
m1_count = lustrum_first_edit[m1_value]
except:
m1_count = 0
parameters.append((year_month, 'editor_drop_off', m1, 'bin', m1_value, m2, 'threshold', 0, m1_count, m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
# participative_editors total_edits bin 0_100, 100_500, 500_1000, 1000_5000, 5000_10000, 10000_50000, 50000_100000, 100000_500000, 500000_1000000, 1000000_1000000000000 days_since_last_edit bin 60, 120, 180.
days_since_last_edit = 60
while days_since_last_edit <= 1095: # 20 years = 7200 days
parameters = []
next_value_days_since_last_edit = days_since_last_edit + 60
if next_value_days_since_last_edit < 1095:
query = 'SELECT count(e1.user_id), e1.abs_value FROM '+languagecode+'wiki_editor_metrics e1 INNER JOIN '+languagecode+'wiki_editors e2 ON e1.user_id = e2.user_id WHERE e1.metric_name = "edit_count_bin" AND days_since_last_edit BETWEEN '+str(days_since_last_edit)+' AND '+str(next_value_days_since_last_edit)+' GROUP by e1.abs_value;'
else:
query = 'SELECT count(e1.user_id), e1.abs_value FROM '+languagecode+'wiki_editor_metrics e1 INNER JOIN '+languagecode+'wiki_editors e2 ON e1.user_id = e2.user_id WHERE e1.metric_name = "edit_count_bin" AND days_since_last_edit > '+str(days_since_last_edit)+' GROUP by e1.abs_value;'
for row in cursor.execute(query):
m2_count = row[0]
m1_value = row[1]
m1_count = edit_bins_count[m1_value]
m2_value = days_since_last_edit
parameters.append((year_month, 'editor_drop_off', 'total_edits', 'bin', m1_value, 'days_since_last_edit', 'bin', m2_value, m1_count, m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
days_since_last_edit = next_value_days_since_last_edit
# flags highest_flag name sysop, autopatrolled, bureaucrat, etc. year_last_edit bin 2001-2021 (180 days inactive since calculation)
parameters = []
query = 'SELECT count(user_id), highest_flag, year_last_edit FROM '+languagecode+'wiki_editors WHERE days_since_last_edit >= 180 GROUP BY year_last_edit, highest_flag'
year_month = cycle_year_month
for row in cursor.execute(query):
m2_count = row[0]
m1_value = row[1]
m2_value = row[2]
try:
m1_count = highest_flag_dict[m1_value]
except:
m1_count = 0
parameters.append((year_month, 'editor_drop_off', 'highest_flag', 'name', m1_value, 'year_last_edit', 'bin', m2_value, m1_count, m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
# active_editors monthly_edits threshold 5 monthly_edits_to_baseline bin 10, 20, 30, 40, 50, 60, 70, 80, 90, 100
# active_editors monthly_edits threshold 5 monthly_editing_days_to_baseline bin 10, 20, 30, 40, 50, 60, 70, 80, 90, 100
active_editors_5_year_month = {}
query = 'SELECT count(distinct user_id), year_month FROM '+languagecode+'wiki_editor_metrics WHERE metric_name = "monthly_edits" AND abs_value >= '+str(5)+' GROUP BY year_month ORDER BY year_month'
for row in cursor.execute(query):
active_editors_5_year_month[row[1]] = row[0]
m2s = ['monthly_edits_to_baseline','monthly_editing_days_to_baseline']
for m2 in m2s:
query = 'SELECT count(e1.user_id), e1.year_month, e2.abs_value FROM '+languagecode+'wiki_editor_metrics e1 INNER JOIN '+languagecode+'wiki_editor_metrics e2 ON e1.user_id = e2.user_id WHERE e1.metric_name = "monthly_edits" AND e1.abs_value >= 5 AND e2.metric_name = "'+m2+'" GROUP by e1.year_month, e2.abs_value;'
for row in cursor.execute(query):
m2_count = row[0]
year_month = row[1]
m2_value = row[2]
m1_count = active_editors_5_year_month[year_month]
parameters.append((year_month, 'editor_drop_off', 'monthly_edits', 'bin', 5, m2, 'bin', m2_value, m1_count, m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
print ('editor_drop_off')
def actions():
year_month = cycle_year_month
# monthly_edits monthly_edits sum main, monthly_edits_ns0_main, etc.
m1s = ['monthly_editing_days','monthly_edits','monthly_edits_ns0_main','monthly_edits_ns10_template','monthly_edits_ns11_template_talk','monthly_edits_ns12_help','monthly_edits_ns13_help_talk','monthly_edits_ns14_category','monthly_edits_ns15_category_talk','monthly_edits_ns1_talk','monthly_edits_ns2_user','monthly_edits_ns3_user_talk','monthly_edits_ns4_project','monthly_edits_ns5_project_talk','monthly_edits_ns6_file','monthly_edits_ns7_file_talk','monthly_edits_ns8_mediawiki','monthly_edits_ns9_mediawiki_talk']
parameters = []
sum_monthly_edits = {}
for m1 in m1s:
query = 'SELECT SUM(abs_value), year_month FROM '+languagecode+'wiki_editor_metrics WHERE metric_name = "'+m1+'" GROUP BY year_month ORDER BY year_month'
for row in cursor.execute(query):
m1_count = row[0]
sum_monthly_edits[m1,row[1]] = m1_count
parameters.append((year_month, 'editor_actions', 'monthly_edits', 'sum', m1, None, None, None, m1_count, None))
cursor.executemany(query_cm,parameters)
conn.commit()
# edits
m2s = ['lustrum_first_edit','year_first_edit','year_last_edit','highest_flag']
for m2 in m2s:
for m1 in m1s:
if m2 == 'year_last_edit':
query = 'SELECT SUM(e1.abs_value), e2.'+m2+', e1.year_month FROM '+languagecode+'wiki_editor_metrics e1 INNER JOIN '+languagecode+'wiki_editors e2 ON e1.user_id = e2.user_id WHERE metric_name = "'+m1+'" AND days_since_last_edit >= 180 GROUP BY e1.year_month, e2.'+m2+' ORDER BY e1.year_month, e2.'+m2
else:
query = 'SELECT SUM(e1.abs_value), e2.'+m2+', e1.year_month FROM '+languagecode+'wiki_editor_metrics e1 INNER JOIN '+languagecode+'wiki_editors e2 ON e1.user_id = e2.user_id WHERE metric_name = "'+m1+'" GROUP BY e1.year_month, e2.'+m2+' ORDER BY e1.year_month, e2.'+m2
# print (m1, m2)
# print (query)
# input('')
for row in cursor.execute(query):
m2_value = row[1]
m2_count = row[0]
year_month = row[2]
m1_count = sum_monthly_edits[m1,year_month]
parameters.append((year_month, 'editor_actions', 'monthly_edits', 'sum', m1, m2, 'bin', m2_value, m1_count, m2_count))
# print (len(parameters))
cursor.executemany(query_cm,parameters)
conn.commit()
# monthly_edits sum main, monthly_edits_ns0_main, etc. active_months bin 1-10, 10-20, 30-40,... to 150
active_months = {(1,100):'0', (217, 228): '217-228', (301, 312): '301-312', (277, 288): '277-288', (25, 36): '25-36', (241, 252): '241-252', (109, 120): '109-120', (85, 96): '85-96', (61, 72): '61-72', (205, 216): '205-216', (289, 300): '289-300', (193, 204): '193-204', (73, 84): '73-84', (49, 60): '49-60', (37, 48): '37-48', (265, 276): '265-276', (181, 192): '181-192', (145, 156): '145-156', (13, 24): '13-24', (253, 264): '253-264', (133, 144): '133-144', (1, 12): '1-12', (121, 132): '121-132', (169, 180): '169-180', (157, 168): '157-168', (229, 240): '229-240', (97, 108): '97-108'}
year_months = set()
parameters = []
for m1 in m1s:
for interval, label in active_months.items():
query = 'SELECT SUM(e1.abs_value), e1.year_month, "'+label+'" FROM '+languagecode+'wiki_editor_metrics e1 INNER JOIN '+languagecode+'wiki_editor_metrics e2 ON e1.user_id = e2.user_id WHERE e1.metric_name = "'+m1+'" AND e2.metric_name = "active_months" AND e2.abs_value BETWEEN '+str(interval[0])+' AND '+str(interval[1])+' GROUP by e1.year_month, e1.abs_value;'
for row in cursor.execute(query):
m2_value = row[2]
m2_count = row[0]
year_month = row[1]
m1_count = sum_monthly_edits[m1,year_month]
year_months.add(year_month)
parameters.append((year_month, 'editor_actions', 'monthly_edits', 'sum', m1, "active_months", 'bin', m2_value, m1_count, m2_count))
cursor.executemany(query_cm,parameters)
conn.commit()
"""
# GINI
def gini_calculation(x):
# (Warning: This is a concise implementation, but it is O(n**2)
# in time and memory, where n = len(x). *Don't* pass in huge
# samples!)
# Mean absolute difference
mad = np.abs(np.subtract.outer(x, x)).mean()
# Relative mean absolute difference
rmad = mad/np.mean(x)
# Gini coefficient
g = 0.5 * rmad
return g
parameters = []
ym = sorted(list(year_months))
for year_month in ym:
query = 'SELECT ce.abs_value FROM '+languagecode+'wiki_editors ch INNER JOIN '+languagecode+'wiki_editor_metrics ce ON ch.user_id = ce.user_id WHERE ce.metric_name = "monthly_edits" AND year_month="'+year_month+'" AND ch.bot = "editor" AND ce.abs_value > 0;'
query = 'SELECT abs_value FROM '+languagecode+'wiki_editor_metrics WHERE metric_name = "monthly_edits" AND year_month="'+year_month+'"'
values = []
for row in cursor.execute(query): values.append(row[0]);
v = gini_calculation(values)
parameters.append((year_month, 'monthly_edits', 'monthly_edits', 'gini', 'monthly_edits', None, None, None, v, None))
# query = 'SELECT ce.abs_value FROM '+languagecode+'wiki_editors ch INNER JOIN '+languagecode+'wiki_editor_metrics ce ON ch.user_id = ce.user_id WHERE ce.metric_name = "edit_count" AND ch.bot = "editor" AND ce.abs_value > 0;'
# values = []
# for row in cursor.execute(query): values.append(row[0]);
# v = gini(values)
# print (v)
# parameters.append((v, 'gini_edits', year_month))
# parameters.append((year_month, 'monthly_edits', 'monthly_edits', 'gini', 'monthly_edits', None, None, None, v, None))
cursor.executemany(query_cm,parameters)
conn.commit()
"""
print ('editor_actions')
# participation()
# flags()
# active_editors()
# retention()
drop_off()
# actions()
duration = str(datetime.timedelta(seconds=time.time() - functionstartTime))
print(languagecode+' '+ function_name+' '+ duration)
"""
def editor_metrics_social(languagecode):
pass
Iteració sencera a MediaWiki history
Mètriques mensuals.
Iterar pel mes que anem.
Consulta a cada mes als registrats d'aquell mes o dos abans.
Comprovar quants d'aquests s'hi interactua I fer els comptadors
Cal guardar les últimes edicions de tots els usuaris per comprovar si hi ha interacció.
Fetes
Interactions newcomers_user_page_talk_page_edits
Interactions newcomers_article_talk_page_edits
Interactions newcomer_count
Interactions survivors_count (aquesta sempre hi haurà un decalatge de dos mesos) -> quants dels newcomers amb qui has interactuat sobreviuen.
Interaccions rebudes
User talk pages
Article talk pages
Hipòtesi. Quan els editors estan a punt de fer drop off... Deixen abans d'interactuar amb newcomers.
Hipòtesi. Quan els editors deixen d'interactuar amb ells... Estan més a prop del drop off.
def editor_metrics_multilingual(languagecode):
print('')
# * wiki_editors
# (user_id integer, user_name text, bot text, user_flags text, primarybinary, primarylang text, primarybinary_ecount, totallangs_ecount, numberlangs integer)
# FUNCTION
# multilingualism: això cal una funció que passi per les diferents bases de dades i creï aquesta
def editor_metrics_content_diversity(languagecode):
print('')
# https://stackoverflow.com/questions/28816330/sqlite-insert-if-not-exist-else-increase-integer-value
# PER NO GUARDAR-HO TOT EN MEMÒRIA. FER L'INSERT DELS CCC EDITATS A CADA ARXIU.
# * wiki_editor_content_metrics
# (user_id integer, user_name text, content_type text, value real)
# FUNCTION
# això cal una funció que corri el mediawiki history amb aquest objectiu havent preseleccionat editors també.
functionstartTime = time.time()
function_name = 'editor_metrics_content_diversity '+languagecode
print (function_name)
print (languagecode)
d_paths = get_mediawiki_paths(languagecode)
if (len(d_paths)==0):
print ('dump error. this language has no mediawiki_history dump: '+languagecode)
# wikilanguages_utils.send_email_toolaccount('dump error at script '+script_name, dumps_path)
# quit()
for dump_path in d_paths:
print(dump_path)
iterTime = time.time()
dump_in = bz2.open(dump_path, 'r')
line = dump_in.readline()
line = line.rstrip().decode('utf-8')[:-1]
values = line.split(' ')
parameters = []
editors_params = []
iter = 0
while line != '':
# iter += 1
# if iter % 1000000 == 0: print (str(iter/1000000)+' million lines.')
line = dump_in.readline()
line = line.rstrip().decode('utf-8')[:-1]
values = line.split('\t')
if len(values)==1: continue
page_id = values[23]
page_title = values[25]
page_namespace = int(values[28])
edit_count = values[34]
Pel tema Edits A Ccc
Diccionari de diccionaris amb el què va editant cada editor cada mes. Més ràpid pel hash.
dict_editors {}
dict_CCC_per_editor {}
Els Edits mensuals a cada CCC? els anem col·locant a una bbdd, que pot ser la mateixa o una altra.
Després sumar l'acumulat final i ja està.
S'esborren els mensuals... Ja que és massa contingut.
"""
#######################################################################################
class Logger_out(object): # this prints both the output to a file and to the terminal screen.
def __init__(self):
self.terminal = sys.stdout
self.log = open("community_health_metrics2.out", "w")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
class Logger_err(object): # this prints both the output to a file and to the terminal screen.
def __init__(self):
self.terminal = sys.stdout
self.log = open("community_health_metrics.err", "w")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
### MAIN:
if __name__ == '__main__':
sys.stdout = Logger_out()
sys.stderr = Logger_err()
startTime = time.time()
cycle_year_month = (datetime.date.today() - relativedelta.relativedelta(months=1)).strftime('%Y-%m')
territories = wikilanguages_utils.load_wikipedia_languages_territories_mapping()
languages = wikilanguages_utils.load_wiki_projects_information();
wikilanguagecodes = sorted(languages.index.tolist())
print ('checking languages Replicas databases and deleting those without one...')
# Verify/Remove all languages without a replica database
for a in wikilanguagecodes:
if wikilanguages_utils.establish_mysql_connection_read(a)==None:
wikilanguagecodes.remove(a)
print (wikilanguagecodes)
# wikilanguagecodes = ['eu','it']
wikilanguagecodes = ['gl','eu','oc']
# wikilanguagecodes = ['ca']
wikilanguagecodes = ['es','fr','it']
wikilanguagecodes = ['ca','eu','es','fr','it']
wikilanguagecodes = ['oc','gl','is','ca','eu']
print ('* Starting the COMMUNITY HEALTH METRICS '+cycle_year_month+' at this exact time: ' + str(datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")))
main()
finishTime = time.time()
print ('* Done with the COMMUNITY HEALTH METRICS completed successfuly after: ' + str(datetime.timedelta(seconds=finishTime - startTime)))
wikilanguages_utils.finish_email(startTime,'community_health_metrics.out', 'COMMUNITY HEALTH METRICS')
| 48.436254 | 1,569 | 0.642068 | 693 | 0.005094 | 0 | 0 | 0 | 0 | 0 | 0 | 52,294 | 0.384388 |
1dfd5d2386c40b6bce3490e841b7ecb270155298 | 1,051 | py | Python | tests/api/length_dist_api_test.py | himanshur-dev/ribopy | 78846e4140a7aa7b4dc995f39606577efaaf0831 | [
"MIT"
] | 4 | 2020-01-14T01:01:36.000Z | 2022-03-21T16:30:24.000Z | tests/api/length_dist_api_test.py | himanshur-dev/ribopy | 78846e4140a7aa7b4dc995f39606577efaaf0831 | [
"MIT"
] | 9 | 2019-12-17T20:45:08.000Z | 2021-12-15T22:34:06.000Z | tests/api/length_dist_api_test.py | himanshur-dev/ribopy | 78846e4140a7aa7b4dc995f39606577efaaf0831 | [
"MIT"
] | 3 | 2019-12-14T17:51:53.000Z | 2022-01-12T16:09:45.000Z | # -*- coding: utf-8 -*-
import unittest
import os
from io import StringIO, BytesIO
import h5py
from ribopy import Ribo
from ribopy import create
from ribopy.merge import merge_ribos
from ribopy.settings import *
from ribopy.core.exceptions import *
import sys
test_dir_1 = os.path.dirname(os.path.realpath(__file__))
sys.path.append(test_dir_1)
test_dir_2 = os.path.dirname(os.path.realpath(test_dir_1))
sys.path.append(test_dir_2)
from multilength_test_data import *
from api_test_base import ApiTestBase
####################################################################
class TestGetLengthDist(ApiTestBase):
def test_get_length_dist(self):
length_dist = self.sample_ribo.get_length_dist(
region_name = "CDS",
experiments = ["merzifon", "ankara"])
self.assertTrue(np.allclose(length_dist["merzifon"], [3,6,11,6]))
self.assertTrue(np.allclose(length_dist["ankara"], [0,8,0,0]))
if __name__ == '__main__':
unittest.main()
| 28.405405 | 73 | 0.639391 | 404 | 0.384396 | 0 | 0 | 0 | 0 | 0 | 0 | 142 | 0.135109 |
1dfd962e97e4b42a0882e32bbba5a65998278c73 | 263 | py | Python | task_management/urls.py | AbdelrahmanRabiee/ayenapp | 43fc4f2b5f53ca308cf60c9f1d74cb2e3f4f4b25 | [
"MIT"
] | null | null | null | task_management/urls.py | AbdelrahmanRabiee/ayenapp | 43fc4f2b5f53ca308cf60c9f1d74cb2e3f4f4b25 | [
"MIT"
] | 5 | 2020-06-06T01:47:05.000Z | 2022-02-10T14:05:22.000Z | task_management/urls.py | AbdelrahmanRabiee/ayenapp | 43fc4f2b5f53ca308cf60c9f1d74cb2e3f4f4b25 | [
"MIT"
] | null | null | null | from django.urls import path
from rest_framework import routers
from task_management import viewsets
app_name = "task_management"
urlpatterns = [
]
task_router = routers.SimpleRouter()
task_router.register(r'tasks', viewsets.TasksViewSet, base_name='tasks') | 18.785714 | 72 | 0.802281 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.121673 |
1dfdf1dc9d560dd375ad6fcdf3b6923ca0b72a86 | 36 | py | Python | core/parsers/__init__.py | andrewisakov/taximaster | b92f06894bbc3414086ec77f1c918a3c0f085241 | [
"MIT"
] | null | null | null | core/parsers/__init__.py | andrewisakov/taximaster | b92f06894bbc3414086ec77f1c918a3c0f085241 | [
"MIT"
] | null | null | null | core/parsers/__init__.py | andrewisakov/taximaster | b92f06894bbc3414086ec77f1c918a3c0f085241 | [
"MIT"
] | null | null | null | from .parsers import request_parser
| 18 | 35 | 0.861111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1dff8f34b88c1028d015cee284c4c5733bdd6297 | 4,698 | py | Python | pytils/clock.py | d33jiang/pytils | 6e44a05480abec6297b77730676bcb5fd5088f6f | [
"MIT"
] | null | null | null | pytils/clock.py | d33jiang/pytils | 6e44a05480abec6297b77730676bcb5fd5088f6f | [
"MIT"
] | null | null | null | pytils/clock.py | d33jiang/pytils | 6e44a05480abec6297b77730676bcb5fd5088f6f | [
"MIT"
] | null | null | null | import functools
import heapq
import logging
from collections import deque
from threading import Condition, RLock
from typing import Any, Callable, List, NamedTuple, Optional
from pytils.mixins import DaemonHandler
from ._config.time import DEFAULT_TIME_SUPPLIER, TimeSupplier, TimeType, ZERO_DURATION
__all__ = [
'Action',
'Clock',
'Handler',
'Schedule',
'ScheduleKey',
'SchedulingQueue',
'TimeSupplier',
'TimeType',
]
_DEFAULT_MAX_TASK_QUEUE_SIZE = 4096
_MAX_SLEEP_DURATION = 12.
#
# Convenience Function
wrap_action = functools.partial
#
# Data Definitions
class ScheduleKey(NamedTuple):
period: Optional[float]
action: 'Action'
class ScheduleEntry(NamedTuple):
next_run: float
key: 'ScheduleKey'
Action = Callable[[], Any]
Handler = Callable[[Action], Any]
#
# Clock
LOGGER = logging.getLogger('pytils.clock')
class Clock:
def __init__(
self,
max_queue_size: int = _DEFAULT_MAX_TASK_QUEUE_SIZE,
s_time: TimeSupplier = DEFAULT_TIME_SUPPLIER):
self._scheduling_queue = SchedulingQueue(max_queue_size, s_time)
@property
def schedule(self):
return self._scheduling_queue.schedule
def run_scheduler(self):
self.schedule.run()
def start_scheduler(self):
self.schedule.start()
def run_handler(self):
self._scheduling_queue.run()
def start_handler(self):
self._scheduling_queue.start()
class SchedulingQueue(DaemonHandler):
def __init__(
self,
max_queue_size: int = _DEFAULT_MAX_TASK_QUEUE_SIZE,
s_time: TimeSupplier = DEFAULT_TIME_SUPPLIER):
self._cv = Condition()
self._task_queue = deque(maxlen=max_queue_size)
self._schedule = Schedule(self._enqueue, s_time)
@property
def schedule(self):
return self._schedule
def handle_one(self):
with self._cv:
self._cv.wait_for(self._task_queue.__len__)
action = self._task_queue.popleft()
self._cv.notify()
action()
def _enqueue(self, action: Action):
with self._cv:
self._task_queue.append(action)
self._cv.notify()
class Schedule(DaemonHandler):
def __init__(self, handler: Handler, s_time: TimeSupplier = DEFAULT_TIME_SUPPLIER):
self.s_time = s_time
self._lock = RLock()
self._cv = Condition(self._lock)
self._schedule = [] # type: List[ScheduleEntry]
self._handler = handler
def register(
self,
action: Action,
period: Optional[TimeType],
delay: Optional[TimeType] = None) -> ScheduleKey:
if period <= ZERO_DURATION:
raise ValueError('period must be positive or None')
if delay < ZERO_DURATION:
raise ValueError('delay must be non-negative or None')
if not delay:
delay = ZERO_DURATION
key = ScheduleKey(period, action)
entry = ScheduleEntry(self.s_time() + delay, key)
with self._cv:
self._enqueue(entry)
return key
def handle_one(self):
with self._cv:
while not self._cv.wait_for(self.has_expired, self._get_next_sleep_duration()):
pass
self._handle_entry(self._dequeue())
def _handle_entry(self, entry: ScheduleEntry):
self._handler(self._create_readmittence_action_from_key(entry.key))
def _create_readmittence_action_from_key(self, key: ScheduleKey) -> Action:
if key.period is None:
return key.action
def perform_action_and_readmit():
next_run = self.s_time() + key.period
key.action()
current_time = self.s_time()
if next_run < current_time:
LOGGER.warning('Scheduled task took longer than its period length to complete')
self._enqueue(ScheduleEntry(max(current_time, next_run), key))
return perform_action_and_readmit
def has_expired(self) -> bool:
with self._lock:
return bool(self._schedule) and self._schedule[0].next_run - self.s_time() <= 0
def _get_next_sleep_duration(self) -> TimeType:
if self._schedule:
return min(_MAX_SLEEP_DURATION, max(ZERO_DURATION, self._schedule[0].next_run - self.s_time()))
else:
return _MAX_SLEEP_DURATION
def _enqueue(self, entry: ScheduleEntry):
self._cv.notify()
heapq.heappush(self._schedule, entry)
def _dequeue(self) -> Optional[ScheduleEntry]:
self._cv.notify()
return heapq.heappop(self._schedule) if self._schedule else None
| 25.813187 | 107 | 0.645807 | 3,967 | 0.844402 | 0 | 0 | 143 | 0.030438 | 0 | 0 | 332 | 0.070668 |
38005601c897e3e947cdabb62140e650a9ec4e7f | 1,473 | py | Python | audtool/__init__.py | Guaxinim5573/audacious-player | 7bcd2afdd91bb18a41fb70500aaf76eaa17da837 | [
"MIT"
] | null | null | null | audtool/__init__.py | Guaxinim5573/audacious-player | 7bcd2afdd91bb18a41fb70500aaf76eaa17da837 | [
"MIT"
] | 1 | 2021-11-29T16:25:22.000Z | 2021-11-29T16:25:22.000Z | audtool/__init__.py | Guaxinim5573/audacious-player | 7bcd2afdd91bb18a41fb70500aaf76eaa17da837 | [
"MIT"
] | null | null | null | import subprocess
import logging
logger = logging.getLogger(__name__)
# Run a command line command and returns stdout
def _run(command):
result = subprocess.run(command, check=True, stdout=subprocess.PIPE, text=True)
result.stdout = result.stdout[:-1]
return result.stdout
def is_playing():
result = subprocess.run(["audtool", "playback-status"], stdout=subprocess.PIPE, text=True)
logger.debug(result.stdout)
if result.returncode == 0 and result.stdout is not None and result.stdout != "stopped":
return True
return False
def status():
return _run(["audtool", "playback-status"])
# Get current song
def get_current_song():
return _run(["audtool", "current-song"])
# Skip to next song
def next():
_run(["audtool", "playlist-advance"])
_run(["audtool", "playback-play"])
def prev():
_run(["audtool", "playlist-reverse"])
_run(["audtool", "playback-play"])
def volume(amount):
_run(["audtool", "set-volume", amount])
def playpause():
_run(["audtool", "playback-playpause"])
# Display all songs in current playlist
def display_songs():
lines = _run(["audtool", "playlist-display"]).splitlines()
lines.pop() # Removes last item, whe don't need that
lines.pop(0) # We also don't need the first item
songs = []
for line in lines:
[pos, name, length] = line.split(" | ")
pos = pos.lstrip()
name = name.rstrip()
songs.append({"name": name, "pos": pos, "length": length})
return songs
def jump(pos):
_run(["audtool", "playlist-jump", pos]) | 26.781818 | 91 | 0.697895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 509 | 0.345553 |
3800e9b86f70276c90dfe1568a7d1e6b8f6d4743 | 5,157 | py | Python | hyperglass/execution/drivers/agent.py | blkmajik/hyperglass | c52a6f609843177671d38bcad59b8bd658f46b64 | [
"BSD-3-Clause-Clear"
] | 298 | 2019-06-17T13:51:46.000Z | 2021-06-23T18:09:51.000Z | hyperglass/execution/drivers/agent.py | blkmajik/hyperglass | c52a6f609843177671d38bcad59b8bd658f46b64 | [
"BSD-3-Clause-Clear"
] | 137 | 2019-06-18T12:59:37.000Z | 2021-06-19T05:50:58.000Z | hyperglass/execution/drivers/agent.py | blkmajik/hyperglass | c52a6f609843177671d38bcad59b8bd658f46b64 | [
"BSD-3-Clause-Clear"
] | 42 | 2019-06-18T07:25:23.000Z | 2021-06-18T17:40:20.000Z | """Execute validated & constructed query on device.
Accepts input from front end application, validates the input and
returns errors if input is invalid. Passes validated parameters to
construct.py, which is used to build & run the Netmiko connections or
hyperglass-frr API calls, returns the output back to the front end.
"""
# Standard Library
from ssl import CertificateError
from typing import Iterable
# Third Party
import httpx
# Project
from hyperglass.log import log
from hyperglass.util import parse_exception
from hyperglass.encode import jwt_decode, jwt_encode
from hyperglass.exceptions import RestError, ResponseEmpty
from hyperglass.configuration import params
# Local
from ._common import Connection
class AgentConnection(Connection):
"""Connect to target device via hyperglass-agent."""
async def collect(self) -> Iterable: # noqa: C901
"""Connect to a device running hyperglass-agent via HTTP."""
log.debug("Query parameters: {}", self.query)
client_params = {
"headers": {"Content-Type": "application/json"},
"timeout": params.request_timeout,
}
if self.device.ssl is not None and self.device.ssl.enable:
with self.device.ssl.cert.open("r") as file:
cert = file.read()
if not cert:
raise RestError(
"SSL Certificate for device {d} has not been imported",
level="danger",
d=self.device.name,
)
http_protocol = "https"
client_params.update({"verify": str(self.device.ssl.cert)})
log.debug(
(
f"Using {str(self.device.ssl.cert)} to validate connection "
f"to {self.device.name}"
)
)
else:
http_protocol = "http"
endpoint = "{protocol}://{address}:{port}/query/".format(
protocol=http_protocol, address=self.device._target, port=self.device.port
)
log.debug("URL endpoint: {}", endpoint)
try:
async with httpx.AsyncClient(**client_params) as http_client:
responses = ()
for query in self.query:
encoded_query = await jwt_encode(
payload=query,
secret=self.device.credential.password.get_secret_value(),
duration=params.request_timeout,
)
log.debug("Encoded JWT: {}", encoded_query)
raw_response = await http_client.post(
endpoint, json={"encoded": encoded_query}
)
log.debug("HTTP status code: {}", raw_response.status_code)
raw = raw_response.text
log.debug("Raw Response:\n{}", raw)
if raw_response.status_code == 200:
decoded = await jwt_decode(
payload=raw_response.json()["encoded"],
secret=self.device.credential.password.get_secret_value(),
)
log.debug("Decoded Response:\n{}", decoded)
responses += (decoded,)
elif raw_response.status_code == 204:
raise ResponseEmpty(
params.messages.no_output, device_name=self.device.name,
)
else:
log.error(raw_response.text)
except httpx.exceptions.HTTPError as rest_error:
msg = parse_exception(rest_error)
log.error("Error connecting to device {}: {}", self.device.name, msg)
raise RestError(
params.messages.connection_error,
device_name=self.device.name,
error=msg,
)
except OSError as ose:
log.critical(str(ose))
raise RestError(
params.messages.connection_error,
device_name=self.device.name,
error="System error",
)
except CertificateError as cert_error:
log.critical(str(cert_error))
msg = parse_exception(cert_error)
raise RestError(
params.messages.connection_error,
device_name=self.device.name,
error=f"{msg}: {cert_error}",
)
if raw_response.status_code != 200:
log.error("Response code is {}", raw_response.status_code)
raise RestError(
params.messages.connection_error,
device_name=self.device.name,
error=params.messages.general,
)
if not responses:
log.error("No response from device {}", self.device.name)
raise RestError(
params.messages.connection_error,
device_name=self.device.name,
error=params.messages.no_response,
)
return responses
| 36.835714 | 86 | 0.54683 | 4,434 | 0.859802 | 0 | 0 | 0 | 0 | 4,337 | 0.840993 | 1,015 | 0.19682 |
3800ef2480e2344c284a3ea135e2ec8a35e84cfa | 3,377 | py | Python | GiftUi.py | DrPleaseRespect/GiftBox | d39b3b644aa579297315ee3b3cb38c79556682f9 | [
"MIT"
] | null | null | null | GiftUi.py | DrPleaseRespect/GiftBox | d39b3b644aa579297315ee3b3cb38c79556682f9 | [
"MIT"
] | null | null | null | GiftUi.py | DrPleaseRespect/GiftBox | d39b3b644aa579297315ee3b3cb38c79556682f9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'GiftUi.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
import Resources_rc
class Ui_Form(object):
def setupUi(self, Form):
if not Form.objectName():
Form.setObjectName(u"Form")
Form.resize(373, 261)
sizePolicy = QSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())
Form.setSizePolicy(sizePolicy)
Form.setMinimumSize(QSize(373, 261))
Form.setMaximumSize(QSize(373, 261))
self.gridLayout = QGridLayout(Form)
self.gridLayout.setObjectName(u"gridLayout")
self.verticalLayout_2 = QVBoxLayout()
self.verticalLayout_2.setObjectName(u"verticalLayout_2")
self.horizontalLayout = QHBoxLayout()
self.horizontalLayout.setObjectName(u"horizontalLayout")
self.label = QLabel(Form)
self.label.setObjectName(u"label")
sizePolicy1 = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
sizePolicy1.setHorizontalStretch(0)
sizePolicy1.setVerticalStretch(0)
sizePolicy1.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy1)
self.label.setMinimumSize(QSize(200, 200))
self.label.setMaximumSize(QSize(200, 200))
self.label.setSizeIncrement(QSize(100, 100))
self.label.setBaseSize(QSize(100, 100))
self.label.setTextFormat(Qt.RichText)
self.label.setPixmap(QPixmap(u":/GiftIcon/gifticon.png"))
self.label.setScaledContents(True)
self.label.setAlignment(Qt.AlignCenter)
self.label.setMargin(23)
self.label.setIndent(-1)
self.horizontalLayout.addWidget(self.label)
self.label_2 = QLabel(Form)
self.label_2.setObjectName(u"label_2")
self.label_2.setScaledContents(False)
self.label_2.setAlignment(Qt.AlignCenter)
self.horizontalLayout.addWidget(self.label_2)
self.horizontalSpacer = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout.addItem(self.horizontalSpacer)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.pushButton = QPushButton(Form)
self.pushButton.setObjectName(u"pushButton")
self.verticalLayout_2.addWidget(self.pushButton)
self.gridLayout.addLayout(self.verticalLayout_2, 0, 0, 1, 1)
self.retranslateUi(Form)
QMetaObject.connectSlotsByName(Form)
# setupUi
def retranslateUi(self, Form):
Form.setWindowTitle(QCoreApplication.translate("Form", u"Form", None))
self.label.setText("")
self.label_2.setText(QCoreApplication.translate("Form", u"Happy Birthday!!", None))
self.pushButton.setText(QCoreApplication.translate("Form", u"Open Gift!", None))
# retranslateUi
| 37.522222 | 95 | 0.665976 | 2,881 | 0.853124 | 0 | 0 | 0 | 0 | 0 | 0 | 571 | 0.169085 |
3801139345c9a4ae91e8d3ae8ff7f84beeaa380a | 251 | py | Python | telegram_coin_bot/__init__.py | sigmaister/telegram-coin-bot | b14329de8fc86cb135f05d7207f64a00f349cabf | [
"MIT"
] | 17 | 2020-07-17T18:55:27.000Z | 2021-11-20T03:54:01.000Z | telegram_coin_bot/__init__.py | sigmaister/telegram-coin-bot | b14329de8fc86cb135f05d7207f64a00f349cabf | [
"MIT"
] | 5 | 2020-07-17T19:23:06.000Z | 2020-08-11T12:45:14.000Z | telegram_coin_bot/__init__.py | sigmaister/telegram-coin-bot | b14329de8fc86cb135f05d7207f64a00f349cabf | [
"MIT"
] | 10 | 2020-07-17T19:01:40.000Z | 2021-12-18T13:21:55.000Z | __author__ = "Wild Print"
__maintainer__ = __author__
__email__ = "telegram_coin_bot@rambler.ru"
__license__ = "MIT"
__version__ = "0.0.1"
__all__ = (
"__author__",
"__email__",
"__license__",
"__maintainer__",
"__version__",
)
| 15.6875 | 42 | 0.669323 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.474104 |
38028042957e1e98ecf41fea1079a9cdbc4aaca9 | 3,200 | py | Python | tests/backend/message.py | IronMeerkat/genesis | 4a053ae4639a12295be9951905ca69383c1da860 | [
"MIT"
] | null | null | null | tests/backend/message.py | IronMeerkat/genesis | 4a053ae4639a12295be9951905ca69383c1da860 | [
"MIT"
] | null | null | null | tests/backend/message.py | IronMeerkat/genesis | 4a053ae4639a12295be9951905ca69383c1da860 | [
"MIT"
] | null | null | null | import test_agent
print('Logging in')
Meerkat = test_agent.TestAgent(username='meerkat', password='12345678', endpoint='/messages/')
Pangolin = test_agent.TestAgent(username='pangolin', password='12345678', endpoint='/messages/')
Badger = test_agent.TestAgent(username='badger', password='12345678', endpoint='/messages/')
Anon = test_agent.TestAgent(endpoint='/messages/')
print("Meerkat sending message to Pangolin")
meerkat_sent_message = Meerkat.post(recepient='pangolin', title="Hello Pangolin", body="It's me, Meerkat")
assert meerkat_sent_message.status_code == 201, f'Failed to send message. code: {meerkat_sent_message.status_code}'
print("Checking meerkat's mailboxes")
meerkat_mailbox = Meerkat.get()
assert meerkat_mailbox.json() == [], "Meerkat can see a message"
meerkat_outgoing = Meerkat.get('sender')
assert len(meerkat_outgoing.json()) == 1, "Meerkat can's see an outgoing message'"
msg_id = meerkat_outgoing.json()[0]['id']
print("Meerkat's mailboxes passed the initial message test")
print("Checking Pangolin's mailboxes")
pangolin_read_mailbox = Pangolin.get('read')
assert pangolin_read_mailbox.json() == [], "read messages showed up in Pangolin's mailbox"
pangolin_outgoing = Pangolin.get('sender')
assert pangolin_outgoing.json() == [], "Pangolin appears to have an outgoing message"
pangoling_mailbox = Pangolin.get()
assert int(pangoling_mailbox.json()[0]['id']) == msg_id, "No matching message in pangolin's inbox"
message = Pangolin.get(f'{msg_id}').json()
assert message['title'] == "Hello Pangolin" and message['body'] == "It's me, Meerkat", 'Wrong message found'
pangolin_read_mailbox = Pangolin.get('read')
assert len(pangolin_read_mailbox.json()) == 1, "Message not found in read"
print("Pangolin succesfully passed the initial message test")
print("Ensuring Badger is unable to access message")
badger_mail = Badger.get('all')
assert badger_mail.json() == [], "Badger has mail"
badger_message = Badger.get(f'{msg_id}')
assert badger_message.status_code >= 400, 'Badger gained unauthorozed access'
print("Badger succesfully passed the initial message test")
print("Ensuring Anon is unable to access message")
anon_mail = Anon.get()
assert anon_mail.status_code >= 400, "Anon can see a mailbox"
anon_message = Anon.get(f'{msg_id}')
assert anon_message.status_code >= 400, "Anon can see meerkat's message"
print("Anon passed test")
print('Pangolin deleting message')
deletion = Pangolin.delete(f'{msg_id}')
assert deletion.status_code == 204, "failed to delete"
pangolin_read_mailbox = Pangolin.get('read')
print(pangolin_read_mailbox.json())
assert pangolin_read_mailbox.json() == [], "read messages showed up in Pangolin's mailbox"
pangolin_deleted = Pangolin.get('deleted')
assert len(pangolin_deleted.json()) == 1, "Deleted message does not show up"
print('Pangolin succesfully deleted messages')
print("Ensuring meerkat can still see message")
meerkat_outgoing = Meerkat.get('sender')
assert len(meerkat_outgoing.json()) == 1, "Meerkat can's see an outgoing message'"
meerkat_outgoing = Meerkat.get('deleted')
assert meerkat_outgoing.json() == [], "Meerkat sees deleted message"
print('Meerkat can succesfully see message pangolin deleted as non-deleted') | 50 | 115 | 0.763438 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,456 | 0.455 |
380600494572f949b77592c439a21526f70f73b6 | 123 | py | Python | contact/urls.py | BobsProgrammingAcademy/Portfolio-Website-Deployment | e3122f6fa0376f64b1580e03b8cb43da6dc85a1a | [
"MIT"
] | null | null | null | contact/urls.py | BobsProgrammingAcademy/Portfolio-Website-Deployment | e3122f6fa0376f64b1580e03b8cb43da6dc85a1a | [
"MIT"
] | null | null | null | contact/urls.py | BobsProgrammingAcademy/Portfolio-Website-Deployment | e3122f6fa0376f64b1580e03b8cb43da6dc85a1a | [
"MIT"
] | null | null | null | from django.urls import path
from .views import ContactListView
urlpatterns = [
path('', ContactListView.as_view()),
] | 20.5 | 40 | 0.739837 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0.01626 |
38070ca03e30ff02df559993d8efdc9637c42c89 | 4,610 | py | Python | src/sentry/integrations/github/client.py | detouched/sentry | 1d3cc332c9ee1c2cf5ddaf1e850e14386c3684dd | [
"BSD-3-Clause"
] | 1 | 2018-12-04T12:57:00.000Z | 2018-12-04T12:57:00.000Z | src/sentry/integrations/github/client.py | detouched/sentry | 1d3cc332c9ee1c2cf5ddaf1e850e14386c3684dd | [
"BSD-3-Clause"
] | 1 | 2021-05-09T11:43:43.000Z | 2021-05-09T11:43:43.000Z | src/sentry/integrations/github/client.py | detouched/sentry | 1d3cc332c9ee1c2cf5ddaf1e850e14386c3684dd | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from datetime import datetime
from sentry.integrations.github.utils import get_jwt
from sentry.integrations.client import ApiClient
class GitHubClientMixin(ApiClient):
allow_redirects = True
base_url = 'https://api.github.com'
def get_jwt(self):
return get_jwt()
def get_last_commits(self, repo, end_sha):
# return api request that fetches last ~30 commits
# see https://developer.github.com/v3/repos/commits/#list-commits-on-a-repository
# using end_sha as parameter
return self.get(
u'/repos/{}/commits'.format(
repo,
),
params={'sha': end_sha},
)
def compare_commits(self, repo, start_sha, end_sha):
# see https://developer.github.com/v3/repos/commits/#compare-two-commits
# where start sha is oldest and end is most recent
return self.get(u'/repos/{}/compare/{}...{}'.format(
repo,
start_sha,
end_sha,
))
def repo_hooks(self, repo):
return self.get(u'/repos/{}/hooks'.format(repo))
def get_commits(self, repo):
return self.get(u'/repos/{}/commits'.format(repo))
def get_commit(self, repo, sha):
return self.get(u'/repos/{}/commits/{}'.format(repo, sha))
def get_repo(self, repo):
return self.get(u'/repos/{}'.format(repo))
def get_repositories(self):
repositories = self.get(
'/installation/repositories',
params={'per_page': 100},
)
return repositories['repositories']
def search_repositories(self, query):
return self.get(
'/search/repositories',
params={'q': query},
)
def get_assignees(self, repo):
return self.get(u'/repos/{}/assignees'.format(repo))
def get_issues(self, repo):
return self.get(u'/repos/{}/issues'.format(repo))
def search_issues(self, query):
return self.get(
'/search/issues',
params={'q': query},
)
def get_issue(self, repo, number):
return self.get(u'/repos/{}/issues/{}'.format(repo, number))
def create_issue(self, repo, data):
endpoint = u'/repos/{}/issues'.format(repo)
return self.post(endpoint, data=data)
def create_comment(self, repo, issue_id, data):
endpoint = u'/repos/{}/issues/{}/comments'.format(repo, issue_id)
return self.post(endpoint, data=data)
def get_user(self, gh_username):
return self.get(u'/users/{}'.format(gh_username))
def request(self, method, path, headers=None, data=None, params=None):
if headers is None:
headers = {
'Authorization': 'token %s' % self.get_token(),
# TODO(jess): remove this whenever it's out of preview
'Accept': 'application/vnd.github.machine-man-preview+json',
}
return self._request(method, path, headers=headers, data=data, params=params)
def get_token(self):
"""
Get token retrieves the active access token from the integration model.
Should the token have expried, a new token will be generated and
automatically presisted into the integration.
"""
token = self.integration.metadata.get('access_token')
expires_at = self.integration.metadata.get('expires_at')
if expires_at is not None:
expires_at = datetime.strptime(expires_at, '%Y-%m-%dT%H:%M:%S')
if not token or expires_at < datetime.utcnow():
res = self.create_token()
token = res['token']
expires_at = datetime.strptime(
res['expires_at'],
'%Y-%m-%dT%H:%M:%SZ',
)
self.integration.metadata.update({
'access_token': token,
'expires_at': expires_at.isoformat(),
})
self.integration.save()
return token
def create_token(self):
return self.post(
u'/installations/{}/access_tokens'.format(
self.integration.external_id,
),
headers={
'Authorization': 'Bearer %s' % self.get_jwt(),
# TODO(jess): remove this whenever it's out of preview
'Accept': 'application/vnd.github.machine-man-preview+json',
},
)
class GitHubAppsClient(GitHubClientMixin):
def __init__(self, integration):
self.integration = integration
super(GitHubAppsClient, self).__init__()
| 32.237762 | 89 | 0.58872 | 4,431 | 0.961171 | 0 | 0 | 0 | 0 | 0 | 0 | 1,291 | 0.280043 |
38075a559a8aab3be200b9d2861d53ab27a363b2 | 1,504 | py | Python | data/tokens.py | account77/mendax_py | 2fa6fdf0ebb835a269665a3b64adf43b8a6e4e20 | [
"MIT"
] | 1 | 2022-02-08T06:50:33.000Z | 2022-02-08T06:50:33.000Z | data/tokens.py | account77/mendax_py | 2fa6fdf0ebb835a269665a3b64adf43b8a6e4e20 | [
"MIT"
] | null | null | null | data/tokens.py | account77/mendax_py | 2fa6fdf0ebb835a269665a3b64adf43b8a6e4e20 | [
"MIT"
] | null | null | null | TT_INT = 'INT' # int
TT_FLOAT = 'FLOAT' # float
TT_STRING = 'STRING' # string
TT_IDENTIFIER = 'IDENTIFIER' # 变量
TT_KEYWORD = 'KEYWORD' # 关键字
TT_PLUS = 'PLUS' # +
TT_MINUS = 'MINUS' # -
TT_MUL = 'MUL' # *
TT_DIV = 'DIV' # /
TT_POW = 'POW' # ^
TT_EQ = 'EQ' # =
TT_LPAREN = 'LPAREN' # (
TT_RPAREN = 'RPAREN' # )
TT_LSQUARE = 'LSQUARE'
TT_RSQUARE = 'RSQUARE'
TT_EE = 'EE' # ==
TT_NE = 'NE' # !=
TT_LT = 'LT' # >
TT_GT = 'GT' # <
TT_LTE = 'LTE' # >=
TT_GTE = 'GTE' # <=
TT_COMMA = 'COMMA'
TT_ARROW = 'ARROW'
TT_NEWLINE = 'NEWLINE'
TT_EOF = 'EOF'
KEYWORDS = [
'VAR',
'AND',
'OR',
'NOT',
'IF',
'ELIF',
'ELSE',
'FOR',
'TO',
'STEP',
'WHILE',
'FUN',
'THEN',
'END',
'RETURN', # 拥有净身出户的权力
'CONTINUE',
'BREAK',
]
class Token:
def __init__(self, type_, value=None, pos_start=None, pos_end=None):
# 可选参数 value, pos_start, pos_end
self.type = type_
self.value = value
# 若没有传入 pos_end,比如 1,此时 pos_start == 1 且 pos_end == 1
if pos_start:
self.pos_start = pos_start.copy() # 调用 copy 方法
self.pos_end = pos_start.copy()
self.pos_end.advance()
if pos_end:
self.pos_end = pos_end.copy()
# 判断 type-value 是否一致
def matches(self, type_, value):
return self.type == type_ and self.value == value
def __repr__(self):
if self.value:
return f'{self.type}:{self.value}'
return f'{self.type}'
| 21.183099 | 72 | 0.535904 | 764 | 0.482323 | 0 | 0 | 0 | 0 | 0 | 0 | 583 | 0.368056 |
3808406b3ee798849fbc2399203409d6b196a582 | 1,503 | py | Python | server/tree_gen/tree_gen.py | patrickferner/Lo-Finity | 406531106277c2ad3422518e8beb39b1c3fa53f3 | [
"MIT"
] | 1 | 2021-06-14T14:41:36.000Z | 2021-06-14T14:41:36.000Z | server/tree_gen/tree_gen.py | Jorbeatz/Lo-Finity | 406531106277c2ad3422518e8beb39b1c3fa53f3 | [
"MIT"
] | null | null | null | server/tree_gen/tree_gen.py | Jorbeatz/Lo-Finity | 406531106277c2ad3422518e8beb39b1c3fa53f3 | [
"MIT"
] | 1 | 2019-07-22T21:45:10.000Z | 2019-07-22T21:45:10.000Z | import requests
import json
import pdb
import time
#url_string = "https://api.hooktheory.com/v1/trends/nodes?cp=1"
response = requests.get("https://api.hooktheory.com/v1/trends/nodes?cp=1", headers={'Authorization': "Bearer 0449bff346d2609ac119bfb7d290e9bb"})
hook_result = json.loads(response.text)
json_result = {}
chord_ID = "1"
json_result[1] = {"prob": [], "child": []}
d_limit = 4
def build_json(current, depth, url_string):
# if depth is d_limit:
# return
global chord_ID
global hook_result
global response
index = 0
if depth != 0:
url_string = url_string + "," + chord_ID
print(url_string)
response = requests.get(url_string, headers={'Authorization': "Bearer 0449bff346d2609ac119bfb7d290e9bb"})
hook_result = json.loads(response.text)
time.sleep(2)
print("Called API Depth " + str(depth))
for obj in hook_result[:4]:
probability = obj["probability"]
chord_ID = obj["chord_ID"].encode("ascii")
current["prob"].append(probability)
current["child"].append({chord_ID: {}})
if chord_ID is '1' or depth is d_limit:
return
current["child"][index][chord_ID] = {"prob": [], "child": []}
build_json(current["child"][index][chord_ID], depth+1, url_string)
index += 1
current = json_result[1]
build_json(current, 0, 'https://api.hooktheory.com/v1/trends/nodes?cp=1')
print json_result
with open('chord_tree.json', 'w') as outfile:
json.dump(json_result, outfile) | 28.903846 | 144 | 0.667332 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 434 | 0.288756 |
3809288c999b3cc7a796e932763fb145aa047d6c | 24,456 | py | Python | rapt/GuidingCenter.py | mkozturk/rapt | cb293ac98d2d7707baf822b4e0efe18b2355f35c | [
"MIT"
] | 1 | 2021-04-12T09:44:56.000Z | 2021-04-12T09:44:56.000Z | rapt/GuidingCenter.py | mkozturk/rapt | cb293ac98d2d7707baf822b4e0efe18b2355f35c | [
"MIT"
] | null | null | null | rapt/GuidingCenter.py | mkozturk/rapt | cb293ac98d2d7707baf822b4e0efe18b2355f35c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
GuidingCenter class definition
AUTHOR:
Kaan Ozturk <mkozturk@yahoo.com>
2016, Rice University
"""
import numpy as np
from scipy.integrate import ode
import pickle
import rapt.utils as ru
import rapt.flutils as rfu
from rapt import c, params, NonAdiabatic
class GuidingCenter:
"""
A GuidingCenter of a charged particle, moving under the influence of given
electric and magnetic fields.
Parameters
----------
pos: list or array
The initial position (x,y,z) of the particle, in meters.
v: float
The initial speed of the particle, in m/s.
pa: float
The initial pitch angle, in degrees (not needed if ppar is given)
ppar: float
The initial parallel momentum, in kg m/s (not needed if pa is given)
t0: float
The time of simulation at the beginning (seconds), ignored for fields that do not depend on time.
mass: float
The mass of the particle, in kg.
charge: float
The charge of the particle, in Coulombs.
field: Field object
The field object that provides electric and magnetic field vectors and related quantities.
Attributes
----------
tcur : float
The current time value (seconds), updated after every integration step.
trajectory : n-by-5 array
The trajectory array, each row in the form (t, x, y, z, ppar).
check_adiabaticity : bool
If True, checks for the adiabaticity condition after every integration step.
See Also
--------
rapt.Particle
rapt.Adaptive
Notes
-----
Examples
--------
"""
def __init__(self, pos=[], v=0, pa=None, ppar=None, t0=0, mass=None, charge=None, field=None):
"""
Object constructor.
Parameters
----------
pos: list or array
The initial position (x,y,z) of the particle, in meters.
v: float
The initial speed of the particle, in m/s.
pa: float
The initial pitch angle, in degrees (not needed if ppar is given)
ppar: float
The initial parallel momentum, in kg m/s (not needed if pa is given)
t0: float
The time of simulation at the beginning (seconds), ignored for fields that do not depend on time.
mass: float
The mass of the particle, in kg.
charge: float
The charge of the particle, in Coulombs.
field: Field object
The field object that provides electric and magnetic field vectors and related quantities.
See Also
--------
init
Notes
-----
All parameters above are optional. The object can be initialized with
an empty parameter set if it is going to be initialized differently
(e.g. with `init` method)
"""
# pos: initial position (array of 3)
# v: initial speed (scalar)
# pa: initial pitch angle
# ppar = initial parallel momentum
# (N.B. Either pa or ppar should be provided. If both given, pa is used.)
# t0: initial time
# mass: particle mass in kg
# charge: particle charge in C
# field: The field object
self.pos = pos # initial position array
self.v = v
self.t0 = t0 # initial time
self.tcur = t0 # current time
self.mass = mass # mass of the particle
self.charge = charge # charge of the particle
self.field = field
self.trajectory = np.zeros((1,5))
self.check_adiabaticity = False
# The object can be initialized two ways:
# Either by specifying the initial conditions,
# or by an empty call, to be initialized later by another object.
# We consider the call empty when pos and v are not specified.
if not (pos==[] or v==0): # if initial state is given explicitly
gamma = 1/np.sqrt(1-(v/c)**2)
if pa != None:
vpar = 0 if pa==90 else v * np.cos(pa*np.pi/180)
ppar = gamma*mass*vpar
self.mu = ru.magnetic_moment(self.tcur, self.pos, ppar/(mass*gamma),
self.v, self.field, self.mass)
self.trajectory[0,0] = t0
self.trajectory[0,1:4] = pos[:]
self.trajectory[0,4] = ppar
def init(self, p):
"""
Initialize a Particle using the state of another Particle or GuidingCenter object.
Parameters
-----------
p : Particle or GuidingCenter object.
The object that is used to initialize the current GuidingCenter.
Notes
-----
Takes the last state (position and momentum) of the given `Particle`
or `GuidingCenter`, sets the initial conditions of self to match them,
and runs `__init__` again. Therefore, all existing data is erased.
"""
from rapt import Particle # Import here to avoid circular imports.
if isinstance(p, GuidingCenter):
B = p.field.magB(p.trajectory[-1,:4])
gamma = np.sqrt(1 + 2*p.mu*B/(p.mass*c*c) + (p.trajectory[-1,4]/p.mass/c)**2)
if gamma-1 < 1e-6: # nonrelativistic
v = np.sqrt(2*p.mu*B/p.mass + (p.trajectory[-1,4]/p.mass)**2)
else:
v = c * np.sqrt(1-1/gamma**2)
self.__init__(pos = p.trajectory[-1,1:4],
v = v,
ppar = p.trajectory[-1,4],
t0 = p.trajectory[-1,0],
mass = p.mass,
charge = p.charge,
field = p.field)
self.check_adiabaticity = p.check_adiabaticity
elif isinstance(p, Particle):
mom = p.trajectory[-1,4:]
gm = np.sqrt(p.mass**2 + np.dot(mom,mom)/c**2) # gamma * m
vel = mom/gm # velocity
pos, vp, v = ru.guidingcenter(p.trajectory[-1,0],
p.trajectory[-1,1:4],
vel,
p.field,
p.mass,
p.charge)
gamma = 1 / np.sqrt(1-(v/c)**2)
self.__init__(pos = pos,
v = v,
ppar = p.mass*gamma*vp,
t0 = p.trajectory[-1,0],
mass = p.mass,
charge = p.charge,
field = p.field)
self.check_adiabaticity = p.check_adiabaticity
else:
raise(ValueError, "Particle or GuidingCenter objects required.")
def save(self,filename):
"""
Save the object on disk.
Uses the built-in pickle module.
Parameters
----------
filename : str
The name of the file to store. If file exists, it will be overwritten.
"""
f = open(filename,"wb")
pickle.dump(self,f)
f.close()
def load(self,filename):
"""
Load the object from disk.
Uses the built-in pickle module. All existing data is replaced
with the stored data.
Parameters
----------
filename : str
The name of the file where the object is stored.
"""
f = open(filename, "rb")
p = pickle.load(f)
f.close()
for k in p.__dict__.keys():
self.__dict__[k] = p.__dict__[k]
def setke(self, ke, unit="ev"):
"""
Scale the velocity vector with the speed corresponding to the given kinetic energy.
Reinitializes the object.
Parameters
-----------
ke : float
The kinetic energy of the particle (eV by default). Can be relativistic.
unit : str, optional
The unit of the energy. If "ev", electron volts, otherwise Joule.
"""
# Calculate the current pitch angle
mc = self.mass*c
t,x,y,z,ppar = self.trajectory[-1]
B = self.field.magB(self.trajectory[-1,:4])
gammasq_minus_1 = 2*self.mu*B/(mc*c) + (ppar/mc)**2
if np.sqrt(gammasq_minus_1 + 1) - 1 < 1e-6: # nonrelativistic
ptot = np.sqrt(2*self.mass*self.mu*B + ppar**2) # total momentum
else: # relativistic
ptot = np.sqrt(gammasq_minus_1)*mc # total momentum
pa_old = np.arccos(ppar/ptot)
# Find the new speed corresponding to the given relativistic energy
v_new = ru.speedfromKE(ke, self.mass, unit) # new speed
# Reinitialize the guiding center
self.__init__(pos=[x,y,z], v=v_new, pa=pa_old, t0=t,
mass=self.mass, charge=self.charge, field=self.field )
def setpa(self, pa):
"""
Reinitialize the object with the given pitch angle (in degrees).
Modifies the velocity vector while keeping the energy constant so that
the particle's pitch angle (angle between the velocity and magnetic field
vectors) is `pa` degrees. Runs the `__init__` method, so any existing data
will be lost.
Parameters
-----------
pa : float
The new pitch angle in degrees.
"""
# Calculate the current speed
mc = self.mass*c
t,x,y,z,ppar = self.trajectory[-1]
B = self.field.magB(self.trajectory[-1,:4])
gammasq = 1 + 2*self.mu*B/(mc*c) + (ppar/mc)**2
if np.sqrt(gammasq)-1 < 1e-6: # nonrelativistic
v = np.sqrt(2*self.mass*self.mu*B + ppar**2) / self.mass
else: # relativistic
v = c * np.sqrt(1-1/gammasq)
# Reinitialize the guiding center
self.__init__(pos = [x,y,z], v=v, pa=pa, t0=t,
mass=self.mass, charge=self.charge, field=self.field)
def isadiabatic(self):
"""
Check if the motion is adiabatic at the current location.
The adiabaticity condition is defined as
.. math::
\rho_c / L < \epsilon_s
\tau_c / T < \epsilon_t
where :math: `\rho_c` is the cyclotron radius, :math: `\tau_c` is the
cyclotron period, L is the field length scale, T is the field time scale,
and :math: `\epsilon_s, \epsilon_t` are thresholds for adiabaticity.
The length scales are provided by the field object, and threshold
parameters set with `rapt.params["epss"]` and `rapt.params["epst"]`,
respectively.
Returns
-------
bool
True if the particle's motion satisfies the adiabaticity conditions
at the present location, False otherwise.
"""
# Needs to be rewritten to avoid repeated calculations of the field vector.
# Spatial and temporal adiabaticity thresholds
eps_sp = params['epss']
eps_t = params['epst']
# Conditions:
# gyroradius / lengthscale < eps_sp
# and
# gyroperiod / timescale < eps_t
if self.field.static:
return self.cycrad() / self.field.lengthscale(self.trajectory[-1,:4]) < eps_sp
else:
return self.cycrad() / self.field.lengthscale(self.trajectory[-1,:4]) < eps_sp \
and self.cycper() / self.field.timescale(self.trajectory[-1,:4]) < eps_t
def _TaoChanBrizardEOM(self, t, Y):
# Phase-space preserving guiding-center EOM,
# valid with nonzero electric fields and and/or time-varying fields.
# Also suitable for use with static, purely magnetic fields.
# Reference: Tao, X., A. A. Chan, and A. J. Brizard (2007),
# Hamiltonian theory of adiabatic motion of relativistic charged particles,
# Phys. Plasmas, 14, 092107, doi:10.1063/1.2773702
tpos = np.concatenate(([t],Y[:3]))
ppar = Y[3]
B = self.field.B(tpos)
Bmag = np.sqrt(np.dot(B,B))
unitb = B / Bmag
gamma = np.sqrt(1 + 2*self.mu*Bmag/(self.mass*c*c) + (ppar/(self.mass*c))**2)
cb = self.field.curlb(tpos)
Bstar = B + ppar * cb / self.charge
Bstarpar = np.dot(Bstar,unitb)
E = self.field.E(tpos)
dbdt = self.field.dbdt(tpos)
gB = self.field.gradB(tpos)
Estar = E - (ppar*dbdt + self.mu * gB / gamma)/self.charge
retval = np.ones(4)
retval[:3] = (ppar * Bstar / (gamma*self.mass) + np.cross(Estar,unitb) ) / Bstarpar
retval[3] = self.charge*np.dot(Estar,Bstar) / Bstarpar
if params["enforce equatorial"]:
retval[2] = retval[3] = 0
return retval
def _BrizardChanEOM(self, t, Y):
# Phase-space preserving guiding-center EOM.
# Valid only under static magnetic fields.
# Special case of TaoChanBrizardEOM when E=0, dB/dt=0.
# Reference: A. J. Brizard and A. A. Chan,
# Nonlinear relativistic gyrokinetic Vlasov-Maxwell equations,
# Phys. Plasmas 6, 4548 (1999)
gamma = 1.0/np.sqrt(1 - (self.v/c)**2)
tpos = np.concatenate(([t],Y[:3]))
ppar = Y[3]
B = self.field.B(tpos)
Bmag = np.sqrt(np.dot(B,B))
unitb = B / Bmag
gB = self.field.gradB(tpos)
cb = self.field.curlb(tpos)
Bstar = B + ppar * cb / self.charge
Bstarpar = np.dot(Bstar, unitb)
retval = np.ones(4)
retval[:3] = (ppar * Bstar / (gamma*self.mass) + self.mu * np.cross(unitb, gB) / (self.charge * gamma) ) / Bstarpar
retval[3] = -self.mu * np.dot(Bstar, gB) / (gamma * Bstarpar)
if params["enforce equatorial"]:
retval[2] = retval[3] = 0
return retval
def _NorthropTellerEOM(self,t,Y):
gamma = 1.0/np.sqrt(1 - (self.v/c)**2)
gm = gamma * self.mass
tpos = np.concatenate(([t],Y[:3]))
ppar = Y[3]
Bvec = self.field.B(tpos)
B = np.sqrt(np.dot(Bvec,Bvec))
bdir = Bvec / B
gB = self.field.gradB(tpos)
retval = np.ones(4)
retval[:3] = (gm*self.v**2 + ppar**2/gm)/(2*self.charge*B**2) * np.cross(bdir,gB) + ppar*bdir/gm
retval[3] = -self.mu * np.dot(bdir, gB) / gamma
if params["enforce equatorial"]:
retval[2] = retval[3] = 0
return retval
def advance(self, delta, eom="TaoChanBrizardEOM"):
"""
Advance the particle position and parallel momentum for a given duration.
The trajectory is initialized at the latest state of the `GuidingCenter`
and integrated for an additional `delta` seconds. Uses the
`scipy.integrate.ode` class with `"dopri5"` solver.
This method can be called many times.
Parameters
----------
delta : float
The number of seconds to advance the trajectory.
eom : {'TaoChanBrizardEOM', 'BrizardChanEOM, 'NorthropTellerEOM'}
The equation of motion that is solved.
Raises
-------
NonAdiabatic
Only if the `check_adiabaticity` attribute is set to True.
Notes
-----
Assuming that the magnetic moment is a constant of motion, and averaging
out the gyrophase, reduces the number of dynamic variables to 4.
Despite their prevalence in textbooks, Northrop-Teller equations are
not accurate enough. The equations by Tao, Chan & Brizard are designed
to preserve the phase-space volume, and give more accurate results than
Northrop-Teller equations.
Neither Northrop-Teller (as implemented here), nor the Brizard-Chan
equations are suitable to use with electric fields or with time-varying
magnetic fields. Tao-Chan-Brizard equations must be used.
Tao-Chan-Brizard EOM (the default setting) reduces to Brizard-Chan EOM
under static conditions, with little computational overhead.
The explicit runge-kutta method of order 4(5) due to Dormand & Prince
with stepsize control is used to solve for the motion. The relative
tolerance `rtol` and the absolute tolerance `atol` of the solver can be
set with `rapt.params["solvertolerances"] = (rtol, atol)`
"""
# Set resolution of the trajectory
if params["GCtimestep"] != 0:
dt = params["GCtimestep"]
else:
dt = self.bounceperiod()/params["bounceresolution"]
t0 = self.trajectory[-1,0]
rtol, atol = params["solvertolerances"]
deriv = eval("self._"+eom)
r = ode(deriv).set_integrator("dopri5",rtol=rtol, atol=atol)
r.set_initial_value(self.trajectory[-1,1:], self.trajectory[-1,0])
while r.successful() and r.t < t0+delta:
r.integrate(r.t+dt)
nextpt = np.hstack(([r.t],r.y))
self.trajectory = np.vstack((self.trajectory,nextpt))
self.tcur = r.t
if self.check_adiabaticity and not self.isadiabatic():
raise NonAdiabatic
def gett(self):
"""Return a 1-d array of time values along the trajectory."""
return self.trajectory[:,0]
def getx(self):
"""Return a 1-d array of x-coordinate values along the trajectory."""
return self.trajectory[:,1]
def gety(self):
"""Return a 1-d array of y-coordinate values along the trajectory."""
return self.trajectory[:,2]
def getz(self):
"""Return a 1-d array of z-coordinate values along the trajectory."""
return self.trajectory[:,3]
def getpp(self):
"""Return a 1-d array of parallel momentum values along the trajectory."""
return self.trajectory[:,4]
def getr(self):
"""Return a 1-d array of radial distance along the trajectory."""
return np.sqrt( self.getx()**2 + self.gety()**2 + self.getz()**2 )
def gettheta(self):
"""Return a 1-d array of azimuthal angle coordinate (radians) along the
trajectory."""
return np.arctan2( self.gety(), self.getx() )
def getphi(self): # Azimuthal angle
"""Return a 1-d array of polar angle coordinate (radians) along the
trajectory."""
return np.arccos( self.getz() / self.getr() )
def getB(self):
"""Return a 1-d array of total magnetic field strength values along the
trajectory."""
out = np.zeros(len(self.trajectory))
for i, row in enumerate(self.trajectory):
out[i] = self.field.magB(row[:4])
return out
def getp(self):
"""Return a 1-d array of total momentum values along the trajectory."""
two_m_mu = 2*self.mass*self.mu
mc = self.mass * c
gamma = self.getgamma()
B = self.getB()
pp = self.trajectory[:,4]
assert B.shape[0] == pp.shape[0] == gamma.shape[0]
res = np.zeros(gamma.shape[0])
# Try to vectorize this later.
for i,g in enumerate(gamma):
if g-1 < 1e-6: #nonrelativistic
res[i] = np.sqrt(two_m_mu*B[i] + pp[i]**2)
else: # relativistic
res[i] = mc*(g**2-1)
return res
def getv(self):
"""Return a 1-d array of particle speed values along the guiding-center trajectories."""
mom = self.getp()
gamma = self.gamma()
assert mom.shape[0] == gamma.shape[0]
return mom/gamma
def cycrad(self):
"""Return the cyclotron radius value at the final position."""
t, r, pp = self.trajectory[-1, 0], self.trajectory[-1, 1:4], self.trajectory[-1, 4]
# The advancer stores parallel momentum at last element. Convert to parallel speed.
Bmag = self.field.magB(self.trajectory[-1,:4])
gamma = np.sqrt(1 + 2*self.mu*Bmag/(self.mass*c*c) + (pp/self.mass/c)**2)
if gamma-1 < 1e-6: # nonrelativistic
vp = pp/self.mass
v = np.sqrt(2*self.mu*Bmag/self.mass + vp**2)
else:
vp = pp / self.mass / gamma # parallel speed
v = c * np.sqrt(1-1/gamma**2)
return ru.cyclotron_radius2(t, r, vp, v, self.field, self.mass, self.charge)
def cycper(self):
"""Return the cyclotron period value at the final position."""
t, r, pp = self.trajectory[-1, 0], self.trajectory[-1, 1:4], self.trajectory[-1, 4]
Bmag = self.field.magB(self.trajectory[-1,:4])
gamma = np.sqrt(1 + 2*self.mu*Bmag/(self.mass*c*c) + pp**2)
if gamma - 1 < 1e-6: # nonrelativistic
vp = pp/self.mass
v = np.sqrt(2*self.mu*Bmag/self.mass + vp**2)
else:
v = c * np.sqrt(1-1/gamma**2)
return ru.cyclotron_period2(t, r, v, self.field, self.mass, self.charge)
def getgamma(self):
"""Return a 1-d array of relativistic factor values along the trajectory.
The relativistic factor is defined as :math:`\gamma = \sqrt{1 + 2\mu B / (mc^2) + (p_{||}/mc)^2}`
"""
mc = self.mass*c
mc2 = mc*c
pp = self.trajectory[:,4]
B = self.getB()
return np.sqrt(1 + 2*self.mu*B/mc2 + (pp/mc)**2)
def getBm(self):
"""Returns an array of mirror field values at each trajectory point.
Defined using the conservation of magnetic moment.
"""
mc = self.mass*c
n = self.trajectory.shape[0]
gamma = self.getgamma()
B = self.getB()
pp = self.trajectory[:,4]
assert B.shape[0] == pp.shape[0] == gamma.shape[0] == n
res = np.zeros(n)
# Try to vectorize this later.
for i,g in enumerate(gamma):
if g-1 < 1e-6: #nonrelativistic
res[i] = (B[i] + 0.5*pp[i]**2/(self.mu*self.mass))
else: # relativistic
res[i] = B[i]/(1 - (pp[i]/mc)**2/((g-1)*(g+1)))
return res
def getke(self):
"""Return a 1-d array of kinetic energy values (Joule) along the
trajectory."""
mc = self.mass*c
mc2 = mc*c
n = self.trajectory.shape[0]
gamma = self.getgamma()
B = self.getB()
pp = self.trajectory[:,4]
assert gamma.shape[0] == B.shape[0] == pp.shape[0] == n
res = np.zeros(n)
# Try to vectorize this later.
for i,g in enumerate(gamma):
if g-1 < 1e-6: #nonrelativistic
res[i] = self.mu*B[i] + 0.5*pp[i]**2/self.mass
else: # relativistic
res[i] = (g-1)*mc2
return res
def bounceperiod(self):
"""Return the bounce period at the current position."""
tpos, ppar = self.trajectory[-1, 0:4], self.trajectory[-1, 4]
Bmag = self.field.magB(tpos)
gamma = np.sqrt(1 + 2*self.mu*Bmag/(self.mass*c*c) + (ppar/(self.mass*c))**2)
if gamma-1 < 1e-6: #nonrelativistic
p = np.sqrt(2*self.mass*self.mu*Bmag + ppar**2) # total momentum
v = p/self.mass
Bmirror = (p**2) / (2*self.mass*self.mu)
else: # relativistic
p = self.mass * c *np.sqrt((gamma+1)*(gamma-1)) # momentum
Bmirror = p**2 / ((p-ppar)*(p+ppar)) * Bmag
v = p/self.mass/gamma
return rfu.bounceperiod(tpos,self.field,Bmirror,v)
def geteye(self,step=1):
"""Return a 1-d array of (time,second invariant) pairs along the
guiding-center trajectory.
Parameters
----------
step : int, optional
Take every `step` values in the trajectory.
"""
Bmirror = self.getBm()
assert Bmirror.shape[0] == self.trajectory.shape[0]
res = np.zeros(self.trajectory[::step].shape[0])
for i,(row,Bm) in enumerate(zip(self.trajectory[::step], Bmirror[::step])):
res[i] = rfu.eye(row[:4], self.field, Bm)
# zip together with time values
return np.array(list(zip(self.trajectory[::step,0], res)))
| 39.1296 | 124 | 0.550785 | 24,156 | 0.987733 | 0 | 0 | 0 | 0 | 0 | 0 | 11,667 | 0.477061 |
3809e399ed92b8e34acaef2788a9f1182ad1acca | 5,835 | py | Python | tests/hospital_resident/test_solver.py | drvinceknight/matching | da18fc12c880a1292a04d06824b5c17e68349e83 | [
"MIT"
] | null | null | null | tests/hospital_resident/test_solver.py | drvinceknight/matching | da18fc12c880a1292a04d06824b5c17e68349e83 | [
"MIT"
] | null | null | null | tests/hospital_resident/test_solver.py | drvinceknight/matching | da18fc12c880a1292a04d06824b5c17e68349e83 | [
"MIT"
] | null | null | null | """ Unit tests for the HR solver. """
import pytest
from matching import Matching
from matching import Player as Resident
from matching.games import HospitalResident
from matching.players import Hospital
from .params import HOSPITAL_RESIDENT, make_game, make_prefs
@HOSPITAL_RESIDENT
def test_init(resident_names, hospital_names, capacities, seed):
""" Test that an instance of HospitalResident is created correctly when
passed a set of players. """
residents, hospitals, game = make_game(
resident_names, hospital_names, capacities, seed
)
assert game.residents == residents
assert game.hospitals == hospitals
assert all([resident.matching is None for resident in game.residents])
assert all([hospital.matching == [] for hospital in game.hospitals])
assert game.matching is None
@HOSPITAL_RESIDENT
def test_create_from_dictionaries(
resident_names, hospital_names, capacities, seed
):
""" Test that HospitalResident is created correctly when passed a set of
dictionaries for each party. """
resident_prefs, hospital_prefs = make_prefs(
resident_names, hospital_names, seed
)
capacities_ = dict(zip(hospital_names, capacities))
game = HospitalResident.create_from_dictionaries(
resident_prefs, hospital_prefs, capacities_
)
for resident in game.residents:
assert resident.pref_names == resident_prefs[resident.name]
assert resident.matching is None
for hospital in game.hospitals:
assert hospital.pref_names == hospital_prefs[hospital.name]
assert hospital.capacity == capacities_[hospital.name]
assert hospital.matching == []
assert game.matching is None
@HOSPITAL_RESIDENT
def test_inputs_resident_prefs(
resident_names, hospital_names, capacities, seed
):
""" Test that each resident's preference list is a subset of the available
hospitals, and check that an Exception is raised if not. """
_, _, game = make_game(resident_names, hospital_names, capacities, seed)
assert game._check_resident_prefs()
game.residents[0].prefs = [Resident("foo")]
with pytest.raises(Exception):
game._check_resident_prefs()
@HOSPITAL_RESIDENT
def test_inputs_hospital_prefs(
resident_names, hospital_names, capacities, seed
):
""" Test that each hospital has ranked all and only those residents that
have ranked it, and check that an Exception is raised if not. """
_, _, game = make_game(resident_names, hospital_names, capacities, seed)
assert game._check_hospital_prefs()
game.hospitals[0].prefs.pop()
with pytest.raises(Exception):
game._check_hospital_prefs()
@HOSPITAL_RESIDENT
def test_solve(resident_names, hospital_names, capacities, seed):
""" Test that HospitalResident can solve games correctly when passed
players. """
for optimal in ["resident", "hospital"]:
residents, hospitals, game = make_game(
resident_names, hospital_names, capacities, seed
)
matching = game.solve(optimal)
assert isinstance(matching, Matching)
assert set(matching.keys()) == set(hospitals)
matched_residents = [
res for match in matching.values() for res in match
]
assert matched_residents != [] and set(matched_residents).issubset(
set(residents)
)
for resident in set(residents) - set(matched_residents):
assert resident.matching is None
@HOSPITAL_RESIDENT
def test_check_validity(resident_names, hospital_names, capacities, seed):
""" Test that HospitalResident finds a valid matching when the game is
solved. """
_, _, game = make_game(resident_names, hospital_names, capacities, seed)
game.solve()
assert game.check_validity()
@HOSPITAL_RESIDENT
def test_resident_matching(resident_names, hospital_names, capacities, seed):
""" Test that HospitalResident recognises a valid matching requires a resident
to have a preference of their match, if they have one. """
_, _, game = make_game(resident_names, hospital_names, capacities, seed)
game.solve()
game.residents[0].matching = Resident(name="foo")
with pytest.raises(Exception):
game._check_resident_matching()
@HOSPITAL_RESIDENT
def test_hospital_matching(resident_names, hospital_names, capacities, seed):
""" Test that HospitalResident recognises a valid matching requires a
hospital to have a preference of each of its matches, if any. """
_, _, game = make_game(resident_names, hospital_names, capacities, seed)
game.solve()
game.hospitals[0].matching.append(Resident(name="foo"))
with pytest.raises(Exception):
game._check_hospital_matching()
@HOSPITAL_RESIDENT
def test_hospital_capacity(resident_names, hospital_names, capacities, seed):
""" Test that HospitalResident recognises a valid matching requires all
hospitals to not be over-subscribed. """
_, _, game = make_game(resident_names, hospital_names, capacities, seed)
game.solve()
game.hospitals[0].matching = range(game.hospitals[0].capacity + 1)
with pytest.raises(Exception):
game._check_hospital_capacity()
def test_check_stability():
""" Test that HospitalResident can recognise whether a matching is stable or
not. """
residents = [Resident("A"), Resident("B"), Resident("C")]
hospitals = [Hospital("X", 2), Hospital("Y", 2)]
a, b, c = residents
x, y = hospitals
a.set_prefs([x, y])
b.set_prefs([y])
c.set_prefs([y, x])
x.set_prefs([c, a])
y.set_prefs([a, b, c])
game = HospitalResident(residents, hospitals)
matching = game.solve()
assert game.check_stability()
matching[x] = [c]
matching[y] = [a, b]
assert not game.check_stability()
| 30.07732 | 82 | 0.709169 | 0 | 0 | 0 | 0 | 4,931 | 0.845073 | 0 | 0 | 1,237 | 0.211997 |
380b492138a5616a5c2ec4ac40188efd1c8a200c | 1,863 | py | Python | build/lib/rqalpha/data/dtsk_python_interface/utility/tdtsk.py | kinglogxzl/rqalpha | 6203803e0fb130fbb5a280ee8e1b902a8c0fd731 | [
"Apache-2.0"
] | null | null | null | build/lib/rqalpha/data/dtsk_python_interface/utility/tdtsk.py | kinglogxzl/rqalpha | 6203803e0fb130fbb5a280ee8e1b902a8c0fd731 | [
"Apache-2.0"
] | null | null | null | build/lib/rqalpha/data/dtsk_python_interface/utility/tdtsk.py | kinglogxzl/rqalpha | 6203803e0fb130fbb5a280ee8e1b902a8c0fd731 | [
"Apache-2.0"
] | null | null | null | import date
import os
def get_time_delta(kline_type = '1_day'):
if kline_type.lower() == '1_day'.lower():
return 0
kline_array = kline_type.split("_")
if len(kline_array) != 2:
raise ValueError('KLine_type {0} not supported'.format(kline_type))
if kline_array[1].lower() == 'min'.lower():
time_interval = int(kline_array[0])
if time_interval in [1,5,15,30,60]:
return time_interval
else:
raise ValueError('KLine_type {0} not supported'.format(kline_type))
else:
raise ValueError('KLine_type {0} not supported'.format(kline_type))
def get_kline_type_labal(kline_type):
''' kline_type must be fit the format {int}_{time unit}
Example: 1_min '''
time_interval = get_time_delta(kline_type)
if time_interval == 0:
return 'KLINE_DAILY'
else:
return "KLINE_{0}_MIN".format(str(time_interval))
def get_kline_type_folder_name(kline_type = '1_day'):
''' kline_type must be fit the format {int}_{time unit}
Example: 1_min '''
time_interval = get_time_delta(kline_type)
if time_interval == 0:
return 'daily'
else:
return "{0}min".format(str(time_interval))
def get_kline_file_relative_root(kline_type = '1_day'):
return os.path.join('KLine', get_kline_type_folder_name(kline_type))
def get_kline_time_list(kline_type = '1_day'):
time_interval = get_time_delta(kline_type)
if time_interval == 0:
return ["0"]
else:
kline_time_list = []
for time_delta in range(0, 120/time_interval):
kline_time_list.append(date.create_kline_time_string(9,30,time_delta*time_interval))
for time_delta in range(0, 120/time_interval):
kline_time_list.append(date.create_kline_time_string(13,00,time_delta*time_interval))
return kline_time_list
| 34.5 | 97 | 0.674181 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 342 | 0.183575 |
380b9beb509374b7ce063345397afc45a8540e75 | 1,255 | py | Python | 2021/4/solution1.py | frenzymadness/aoc | c9018e757bae61a696e675a827aef873995abdd3 | [
"WTFPL"
] | 2 | 2020-12-04T09:45:38.000Z | 2020-12-07T14:06:12.000Z | 2021/4/solution1.py | frenzymadness/aoc | c9018e757bae61a696e675a827aef873995abdd3 | [
"WTFPL"
] | null | null | null | 2021/4/solution1.py | frenzymadness/aoc | c9018e757bae61a696e675a827aef873995abdd3 | [
"WTFPL"
] | null | null | null | from collections import defaultdict
def check_winner(cards):
for card_index, card in cards.items():
for index in range(5):
complete_line = all([x[1] for x in card[index]])
complete_column = all([card[x][index][1] for x in range(5)])
if complete_line or complete_column:
return card_index
with open("input.txt") as input_file:
lines = input_file.readlines()
drawn = [int(n) for n in lines[0].strip().split(",")]
del lines[0:2]
cards = defaultdict(list)
card = 0
# Cards prep
for line in lines:
line = line.strip()
if not line:
card += 1
continue
card_line = [[int(n), False] for n in line.split()]
cards[card].append(card_line)
# Final game
for n in drawn:
# Find all cells to cross
for card in cards.values():
for line in card:
try:
index = line.index([n, False])
line[index][1] = True
except ValueError:
pass
# Check a winner
winner = check_winner(cards)
if winner is not None:
last_drawn = n
break
sum = 0
for line in cards[winner]:
for n, drawn in line:
if not drawn:
sum += n
print(sum * last_drawn)
| 22.818182 | 72 | 0.573705 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.062948 |
380c11403c1a76b0d531997f5e39af651f1a3bb9 | 31 | py | Python | djshed/__init__.py | carthage-college/django-djshed | 122dc2f9dd582fd943915b6268d4e90dc84993ca | [
"BSD-3-Clause"
] | null | null | null | djshed/__init__.py | carthage-college/django-djshed | 122dc2f9dd582fd943915b6268d4e90dc84993ca | [
"BSD-3-Clause"
] | 9 | 2020-03-04T16:04:21.000Z | 2022-02-14T17:34:07.000Z | djshed/__init__.py | carthage-college/django-djshed | 122dc2f9dd582fd943915b6268d4e90dc84993ca | [
"BSD-3-Clause"
] | null | null | null | from djshed.constants import *
| 15.5 | 30 | 0.806452 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
380d0996fe02d82d90603eefc1f9b3eea03168d3 | 666 | py | Python | examples/hover_example.py | kail85/mpldatacursor | 2df44a3912c2684b2d66fadc7cacbb9f60a15186 | [
"MIT"
] | 165 | 2015-01-09T03:48:50.000Z | 2022-03-16T03:25:23.000Z | examples/hover_example.py | kail85/mpldatacursor | 2df44a3912c2684b2d66fadc7cacbb9f60a15186 | [
"MIT"
] | 87 | 2015-02-09T11:17:49.000Z | 2022-01-04T02:48:00.000Z | examples/hover_example.py | kail85/mpldatacursor | 2df44a3912c2684b2d66fadc7cacbb9f60a15186 | [
"MIT"
] | 46 | 2015-01-13T00:59:18.000Z | 2022-03-03T12:46:40.000Z | """
Demonstrates the hover functionality of mpldatacursor as well as point labels
and a custom formatting function. Notice that overlapping points have both
labels displayed.
"""
import string
import matplotlib.pyplot as plt
import numpy as np
from mpldatacursor import datacursor
np.random.seed(1977)
x, y = np.random.random((2, 26))
labels = string.ascii_lowercase
fig, ax = plt.subplots()
ax.scatter(x, y, s=200)
ax.set_title('Mouse over a point')
# Show only the point label and allow nicer formatting if points overlap
formatter = lambda **kwargs: ', '.join(kwargs['point_label'])
datacursor(hover=True, formatter=formatter, point_labels=labels)
plt.show()
| 27.75 | 77 | 0.771772 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 287 | 0.430931 |
380dcb21be4aef252c42a69358eeac5c771319a7 | 1,236 | py | Python | FilterResponseNorm.py | PanTongLin/Filter_Response_Norm | b87b82e0dd89c65fc9c6ed4b1154955afe2ad4da | [
"MIT"
] | 1 | 2019-12-03T12:44:10.000Z | 2019-12-03T12:44:10.000Z | FilterResponseNorm.py | PanTongLin/Filter_Response_Norm | b87b82e0dd89c65fc9c6ed4b1154955afe2ad4da | [
"MIT"
] | null | null | null | FilterResponseNorm.py | PanTongLin/Filter_Response_Norm | b87b82e0dd89c65fc9c6ed4b1154955afe2ad4da | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
class FilterResponseNorm(nn.Module):
def __init__(self, num_features, eps=1e-6, use_TLU=True):
super(FilterResponseNorm, self).__init__()
self.num_features = num_features
self.eps = eps
self.use_TLU = use_TLU
self.weight = nn.Parameter(torch.Tensor(num_features))
self.bias = nn.Parameter(torch.Tensor(num_features))
if use_TLU:
self.tau = nn.Parameter(torch.Tensor(num_features))
else:
self.register_parameter('tau', None)
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.weight)
nn.init.zeros_(self.bias)
if self.use_TLU:
nn.init.zeros_(self.tau)
def forward(self, input):
nu2 = torch.mean(input.pow(2), (2, 3), keepdim=True)
out = input * torch.rsqrt(nu2 + abs(self.eps))
weight = self.weight[..., None, None]
bias = self.bias[..., None, None]
# Return after applying the Offset-ReLU non-linearity
if self.use_TLU:
tau = self.tau[..., None, None]
return torch.max(weight*out + bias, tau)
else:
return self.gamma*out + self.bias
| 30.146341 | 63 | 0.602751 | 1,198 | 0.969256 | 0 | 0 | 0 | 0 | 0 | 0 | 58 | 0.046926 |
38102a20a35e7ad75c7282cba75b15f3f03e641a | 13,062 | py | Python | tapas/utils/attention_utils_test.py | apurvak/tapas | 7884280be78d2f58ad9c125504d710ef89f49f9a | [
"Apache-2.0"
] | 816 | 2020-03-31T15:15:56.000Z | 2022-03-31T19:28:02.000Z | tapas/utils/attention_utils_test.py | apurvak/tapas | 7884280be78d2f58ad9c125504d710ef89f49f9a | [
"Apache-2.0"
] | 155 | 2020-05-02T15:45:42.000Z | 2022-03-31T08:35:23.000Z | tapas/utils/attention_utils_test.py | apurvak/tapas | 7884280be78d2f58ad9c125504d710ef89f49f9a | [
"Apache-2.0"
] | 173 | 2020-05-01T02:39:38.000Z | 2022-03-30T06:43:29.000Z | # coding=utf-8
# Copyright 2019 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
from tapas.models.bert import modeling
from tapas.utils import attention_utils
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
class AttentionUtilsTest(tf.test.TestCase):
def test_matches_token_type_id(self):
with self.cached_session() as sess:
row_ids = sess.run(
tf.constant([[1, 2, 2], [5, 5, 6], [1, 2, 3], [4, 5, 6]]))
result = attention_utils._matches_token_type_id(row_ids)
expected_result = sess.run(
tf.constant([
[[1, 0, 0], [0, 1, 1], [0, 1, 1]],
[[1, 1, 0], [1, 1, 0], [0, 0, 1]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
]))
self.assertAllEqual(result, expected_result)
def test_comput_bucket_id(self):
with self.cached_session() as sess:
column_ids = tf.constant([[0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 0, 0, 0, 0]])
input_mask = tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]])
bucket_ids = tf.constant([[0, 0, 0, 1, 2, 1, 2, 3, 1, 2, 3, 3, 4, 4, 4]])
result = sess.run(
attention_utils._compute_bucket_id(
bucket_size=3,
header_size=3,
token_type_id=column_ids,
input_mask=input_mask))
expected_result = sess.run(bucket_ids)
self.assertAllEqual(result, expected_result)
def test_comput_bucket_id_on_distinct_columns(self):
with self.cached_session() as sess:
# When bucket size is 1 and columns_ids are a permutation of 0..n-1 then
# the bucket_ids will match the column_ids
column_ids = tf.constant([[0, 2, 3, 1, 5, 4]])
input_mask = tf.constant([[1, 1, 1, 1, 1, 1]])
bucket_ids = tf.constant([[0, 2, 3, 1, 5, 4]])
result = sess.run(
attention_utils._compute_bucket_id(
bucket_size=1,
header_size=1,
token_type_id=column_ids,
input_mask=input_mask))
expected_result = sess.run(bucket_ids)
self.assertAllEqual(result, expected_result)
def test_comput_bucket_id_with_header(self):
with self.cached_session() as sess:
# Similar to the distinct column test, but now we have two header tokens
column_ids = tf.constant([[0, 2, 3, 1, 5, 4]])
input_mask = tf.constant([[1, 1, 1, 1, 1, 1]])
bucket_ids = tf.constant([[0, 1, 2, 0, 4, 3]])
result = sess.run(
attention_utils._compute_bucket_id(
bucket_size=1,
header_size=2,
token_type_id=column_ids,
input_mask=input_mask))
expected_result = sess.run(bucket_ids)
self.assertAllEqual(result, expected_result)
def test_compute_headwise_sparse_attention_mask(self):
with self.cached_session() as sess:
# Table Structure
# Q1 Q2 A0 B0 C0
# A1 B1 C1
# A2 B2 C2 PAD1 PAD2 PAD3 PAD4
row_ids = tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 0]])
input_mask = tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]])
segment_ids = tf.constant([[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]])
column_ids = tf.constant([[0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 0, 0, 0, 0]])
result = sess.run(
attention_utils.compute_headwise_sparse_attention_mask(
num_row_heads=2,
num_column_heads=3,
bucket_size=0,
header_size=None,
input_mask=input_mask,
segment_ids=segment_ids,
column_ids=column_ids,
row_ids=row_ids))[0]
# Q1 Q2 A0 A1 A2 B0 B1 B2 C0 C1 C2 PADDING
expected_row_result = [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # Q1
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # Q2
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # A0
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # A1
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # A2
[1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], # B0
[1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], # B1
[1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], # B2
[1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0], # C0
[1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0], # C1
[1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0], # C2
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD1
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD2
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD3
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD4
]
# Q1 Q2 A0 A1 A2 B0 B1 B2 C0 C1 C2 PADDING
expected_column_result = [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # Q1
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # Q2
[1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0], # A0
[1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0], # A1
[1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0], # A2
[1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0], # B0
[1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0], # B1
[1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0], # B2
[1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0], # C0
[1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0], # C1
[1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0], # C2
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD1
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD2
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD3
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD4
]
expected_result = sess.run(
tf.constant([expected_row_result] * 2 + [expected_column_result] * 3))
self.assertAllEqual(result, expected_result)
# With bucket size 2 no extra attention should be pruned
result = sess.run(
attention_utils.compute_headwise_sparse_attention_mask(
num_row_heads=2,
num_column_heads=3,
bucket_size=3,
header_size=None,
input_mask=input_mask,
segment_ids=segment_ids,
column_ids=column_ids,
row_ids=row_ids))[0]
# The attention of the padding tokens changes but it has no impact
self.assertAllEqual(result[:, :, -4], expected_result[:, :, -4])
def test_compute_sparse_attention_mask(self):
with self.cached_session() as sess:
# Table Structure
# Q1 Q2 A0 B0 C0
# A1 B1 C1
# A2 B2 C2 PAD1 PAD2 PAD3 PAD4
row_ids = tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 0]])
input_mask = tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]])
segment_ids = tf.constant([[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]])
column_ids = tf.constant([[0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 0, 0, 0, 0]])
result = sess.run(
attention_utils.compute_sparse_attention_mask(
input_mask=input_mask,
segment_ids=segment_ids,
column_ids=column_ids,
row_ids=row_ids))[0]
expected_result = sess.run(
# Q1 Q2 A0 A1 A2 B0 B1 B2 C0 C1 C2 PADDING
tf.constant([
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # Q1
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # Q2
[1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0], # A0
[1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0], # A1
[1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0], # A2
[1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], # B0
[1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0], # B1
[1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0], # B2
[1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0], # C0
[1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0], # C1
[1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0], # C2
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD1
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD2
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD3
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], # PAD4
]))
self.assertAllEqual(result, expected_result)
def compare_efficient_and_vanilla_sparse_attention(
self, sess, row_ids, column_ids,
embedding_dim, num_row_heads, num_column_heads,
bucket_size, header_size, seq_length, input_size,
first_segment_size, rows_sorted):
tf.random.set_random_seed(42)
num_attention_heads = num_row_heads + num_column_heads
pad_size = seq_length - input_size
second_segment_size = input_size - first_segment_size
input_mask = tf.constant([[1] * input_size + [0] * pad_size])
segment_ids = tf.constant([[0] * first_segment_size +
[1] * second_segment_size + [0] * pad_size])
input_tensor = tf.random.normal(shape=[1, seq_length, 128])
attention_mask = attention_utils.compute_headwise_sparse_attention_mask(
num_row_heads=num_row_heads,
num_column_heads=num_column_heads,
bucket_size=bucket_size,
header_size=header_size,
input_mask=input_mask,
segment_ids=segment_ids,
column_ids=column_ids,
row_ids=row_ids)
expected_result_op = modeling.attention_layer(
input_tensor,
input_tensor,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=embedding_dim)[0][0, :input_size]
result_ops = []
for sort_after_projection in [True, False]:
attention_layer = attention_utils.create_bucketed_attention_layer(
input_mask=input_mask,
input_header=tf.math.equal(segment_ids, 0),
bucket_size=bucket_size,
header_size=header_size,
sort_after_projection=sort_after_projection,
token_type_ids=[(num_row_heads, rows_sorted, row_ids),
(num_column_heads, False, column_ids)])
result_ops.append(
attention_layer(
input_tensor,
input_tensor,
num_attention_heads=num_attention_heads,
size_per_head=embedding_dim)[0][0, :input_size])
sess.run(tf.global_variables_initializer())
expected_result, result1, result2 = sess.run([expected_result_op] +
result_ops)
self.assertAllClose(result1, expected_result)
self.assertAllClose(result2, expected_result)
def test_efficient_sparse_attention_matches_vanilla_version(self):
# Tests that computing bucketed and non-bucketed attention for random
# embeddings produces the same result.
with self.cached_session() as sess:
# Table Structure
# Q1 Q2 A0 B0 C0
# A1 B1 C1
# A2 B2 C2 PAD1 PAD2 PAD3 PAD4
row_ids = tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 0]])
column_ids = tf.constant([[0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 0, 0, 0, 0]])
self.compare_efficient_and_vanilla_sparse_attention(
sess,
row_ids=row_ids,
column_ids=column_ids,
embedding_dim=32,
num_row_heads=2,
num_column_heads=4,
bucket_size=3,
header_size=3,
seq_length=15,
input_size=11,
first_segment_size=2,
rows_sorted=True)
def test_efficient_sparse_attention_random_ids_matches_vanilla_version(self):
# Tests that computing bucketed and non-bucketed attention for random
# attributes not mapping to real columns yield the same results.
with self.cached_session() as sess:
seq_length = 14
row_ids = tf.random.uniform(
shape=[1, seq_length], maxval=20, dtype=tf.int32)
column_ids = tf.random.uniform(
shape=[1, seq_length], maxval=20, dtype=tf.int32)
self.compare_efficient_and_vanilla_sparse_attention(
sess,
row_ids=row_ids,
column_ids=column_ids,
embedding_dim=16,
num_row_heads=5,
num_column_heads=1,
bucket_size=2,
header_size=4,
seq_length=seq_length,
input_size=11,
first_segment_size=2,
rows_sorted=False)
if __name__ == "__main__":
tf.test.main()
| 40.81875 | 80 | 0.535982 | 12,239 | 0.936993 | 0 | 0 | 0 | 0 | 0 | 0 | 1,771 | 0.135584 |
3811d802f57bc19ae72289d16c14c14349ea0f99 | 1,018 | py | Python | backend/src/application.py | rpenna/post-scheduler | 775a145eab6064ecfe2028064ab85834a7a1c412 | [
"MIT"
] | null | null | null | backend/src/application.py | rpenna/post-scheduler | 775a145eab6064ecfe2028064ab85834a7a1c412 | [
"MIT"
] | 8 | 2020-10-10T02:02:56.000Z | 2020-11-08T21:04:17.000Z | backend/src/application.py | rpenna/post-scheduler | 775a145eab6064ecfe2028064ab85834a7a1c412 | [
"MIT"
] | null | null | null | from flask import Flask
from flask_mongoengine import MongoEngine
from src.controllers.user_controller import UserController
db_host = {
'production': 'mongodb://localhost/posts',
'test': 'mongomock://localhost/posts'
}
def create_app(db_host_type: str = 'production') -> Flask:
"""Creates Flask app
Args:
db_host_type (str, optional): Desired database settings. Defaults to 'production'.
Returns:
Flask: Flask app
"""
database = MongoEngine()
app = Flask(__name__)
app.config['MONGODB_SETTINGS'] = {
'db': 'posts',
'host': db_host[db_host_type]
}
database.init_app(app)
app.add_url_rule('/user', methods=['POST'], view_func=UserController().create)
app.add_url_rule('/user/login', methods=['POST'], view_func=UserController().login)
app.add_url_rule('/user/status', methods=['GET'], view_func=UserController().get)
app.add_url_rule('/user/logout', methods=['POST'], view_func=UserController().logout)
return app
| 29.085714 | 90 | 0.6778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 361 | 0.354617 |
3812fff3955607f6a3a8b89e70ea9dfc7716d228 | 584 | py | Python | Curso-em-video-Python3-mundo2/ex058.py | bernardombraga/Solucoes-exercicios-cursos-gratuitos | 0347a8325443fce84e0a753c96f523a22858537b | [
"MIT"
] | null | null | null | Curso-em-video-Python3-mundo2/ex058.py | bernardombraga/Solucoes-exercicios-cursos-gratuitos | 0347a8325443fce84e0a753c96f523a22858537b | [
"MIT"
] | null | null | null | Curso-em-video-Python3-mundo2/ex058.py | bernardombraga/Solucoes-exercicios-cursos-gratuitos | 0347a8325443fce84e0a753c96f523a22858537b | [
"MIT"
] | null | null | null | import random
pc = random.randint(0, 10)
print('''Sou seu computador...
Acabei de pensar em um número entre 0 e 10.
Será que você consegue adivinhar qual foi?''')
player = int(input('Qual é seu palpite? '))
# print(pc, player)
tentativas = 1
while pc != player:
if pc > player:
print('Mais... Tente mais uma vez.')
player = int(input('Qual é seu palpite? '))
elif pc < player:
print('Menos... Tente mais uma vez.')
player = int(input('Qual é seu palpite? '))
tentativas += 1
print('Acertou com {} tentativas. Parabéns!'.format(tentativas))
| 32.444444 | 64 | 0.640411 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 303 | 0.51269 |
381586539d665f7a1421b0fdd67949cba8d6ee1c | 985 | py | Python | mumblr/templatetags/mumblr_tags.py | hmarr/django-mumblr | 36f6e4b0d5d5f999591987b24765915edce92400 | [
"MIT"
] | 35 | 2015-02-10T12:44:33.000Z | 2020-12-27T19:47:33.000Z | mumblr/templatetags/mumblr_tags.py | hmarr/django-mumblr | 36f6e4b0d5d5f999591987b24765915edce92400 | [
"MIT"
] | null | null | null | mumblr/templatetags/mumblr_tags.py | hmarr/django-mumblr | 36f6e4b0d5d5f999591987b24765915edce92400 | [
"MIT"
] | 16 | 2015-02-20T19:13:43.000Z | 2022-02-16T04:38:12.000Z | from django.template import Library, Node, TemplateSyntaxError
import re
from mumblr.entrytypes import EntryType
register = Library()
class LatestEntriesNode(Node):
def __init__(self, num, var_name):
self.num = int(num or 10)
self.var_name = var_name
def render(self, context):
context[self.var_name] = list(EntryType.live_entries()[:self.num])
return ''
@register.tag
def get_latest_entries(parser, token):
# Usage:
# {% get_latest_entries as entries %} (default 10 entries)
# (or {% get_latest_entries 7 as entries %} for 7 entries)
# {% for entry in entries %}
# <li>{{ entry.title }}</li>
# {% endfor %}
tag_name, contents = token.contents.split(None, 1)
match = re.search(r'(\d+\s+)?as\s+([A-z_][A-z0-9_]+)', contents)
if not match:
raise TemplateSyntaxError("%r tag syntax error" % tag_name)
num, var_name = match.groups()
return LatestEntriesNode(num, var_name)
| 27.361111 | 74 | 0.643655 | 262 | 0.26599 | 0 | 0 | 580 | 0.588832 | 0 | 0 | 266 | 0.270051 |
3817b5265bdca590b8a4c43a50fd915e4ad3d51f | 448 | py | Python | funcional.py | renzon/novatec | 9b6511217c4972bf8584dea8bca1460d4bcb9d87 | [
"MIT"
] | null | null | null | funcional.py | renzon/novatec | 9b6511217c4972bf8584dea8bca1460d4bcb9d87 | [
"MIT"
] | null | null | null | funcional.py | renzon/novatec | 9b6511217c4972bf8584dea8bca1460d4bcb9d87 | [
"MIT"
] | 2 | 2016-05-21T12:28:13.000Z | 2020-09-06T21:58:36.000Z | from time import time
def profile(funcao):
def funcao_wrapper(*args, **kwargs):
inicio = time()
resultado = funcao(*args, **kwargs)
fim = time()
print(fim - inicio)
return resultado
return funcao_wrapper
@profile
def f(n):
return 'Executei f {}'.format(n)
print(f.__name__)
a = f
print(a(4))
print(a(5))
def g():
def h():
return 'executando h'
return h
print(g()())
| 12.444444 | 43 | 0.566964 | 0 | 0 | 0 | 0 | 55 | 0.122768 | 0 | 0 | 29 | 0.064732 |
38188123fc74067b2382c0edf77f410238662bfd | 1,040 | py | Python | bf_smb_tmux.py | BadNameException/SambaBF | 7421ac1d821807ffc565b7b5b466c85084343fb9 | [
"MIT"
] | null | null | null | bf_smb_tmux.py | BadNameException/SambaBF | 7421ac1d821807ffc565b7b5b466c85084343fb9 | [
"MIT"
] | null | null | null | bf_smb_tmux.py | BadNameException/SambaBF | 7421ac1d821807ffc565b7b5b466c85084343fb9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
from subprocess import Popen, PIPE, check_output
p = 1
USERNAME = "sigurdkb"
IP = "172.16.0.30"
PORT = "445"
new_results = []
correct_pw = ""
cracked = bool(False)
counter = 0
guess_result = ((),)
pw = ''
def connect_smb():
global correct_pw
global cracked
global guess_result
global pw
while cracked == False:
pw = get_next_pw()
arg = 'smbclient //'+IP+'/homes -U '+USERNAME+' '+pw
proc = Popen('/bin/bash', stdin=PIPE, stdout=PIPE)
stdout = proc.communicate(arg.encode())
guess_result = stdout
print ("Try: " + pw )
if b'Welcome' in guess_result:
print ("Correct password: " + pw)
correct_pw = pw
f = open("correct_pw.txt", 'w')
f.write(correct_pw)
cracked = True
else:
print ("Tried: " + pw)
def get_next_pw():
global counter
global filenr
f = open("wl.txt", 'r')
l = f.readlines()[counter]
if l == '':
print ("wordlist_part"+ str(filenr) + " er ferdig")
exit(0)
else:
l = 'f'+l
counter += 1
return l.strip('\n')
connect_smb()
| 17.931034 | 54 | 0.626923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 219 | 0.210577 |
381a3c1f8fbb03e20ac9c74bfde3196e650b1d31 | 2,819 | py | Python | VA/main/utils/cluster.py | YuJaceKim/Activity-Recognition-with-Combination-of-Deeply-Learned-Visual-Attention-and-Pose-Estimation | 23b9191f150d0edb981cf22a47a618feb55578b9 | [
"MIT"
] | 343 | 2018-07-18T10:39:30.000Z | 2022-03-30T02:32:06.000Z | VA/main/utils/cluster.py | YuJaceKim/Activity-Recognition-with-Combination-of-Deeply-Learned-Visual-Attention-and-Pose-Estimation | 23b9191f150d0edb981cf22a47a618feb55578b9 | [
"MIT"
] | 47 | 2018-09-03T03:35:13.000Z | 2021-11-15T02:09:15.000Z | VA/main/utils/cluster.py | YuJaceKim/Activity-Recognition-with-Combination-of-Deeply-Learned-Visual-Attention-and-Pose-Estimation | 23b9191f150d0edb981cf22a47a618feb55578b9 | [
"MIT"
] | 83 | 2018-10-15T08:36:12.000Z | 2022-03-05T05:51:16.000Z | import numpy as np
# import matplotlib.pyplot as plt
from scipy.cluster.vq import kmeans
# def plothist(x):
# vmin = x.min()-1
# vmax = x.max()+1
# bins = np.arange(vmin, vmax, (vmax - vmin)/50)
# plt.hist(x, bins=bins)
# plt.show()
# def scatterpred(pred):
# plt.scatter(pred[:,0], pred[:,1])
# plt.show()
# def scatter_kmeans(pred):
# plt.scatter(pred[:,0], pred[:,1], color='b')
# c,v = kmeans(pred, 8)
# plt.scatter(c[:,0], c[:,1], color='r')
# plt.show()
def most_assigned(x, c):
nb_c = len(c)
assign = np.zeros(nb_c)
for i in range(len(x)):
y = x[i].reshape((1,2))
d = np.sqrt(np.sum(np.power(y.repeat(nb_c, axis=0) - c, 2), axis=1))
assign[d.argmin()] += 1
return assign.argmax()
def mean_on_most_assigned(x, c):
nb_c = len(c)
assign = np.zeros(nb_c)
mean = np.zeros(c.shape)
for i in range(len(x)):
y = x[i].reshape((1,2))
d = np.sqrt(np.sum(np.power(y.repeat(nb_c, axis=0) - c, 2), axis=1))
idx = d.argmin()
assign[idx] += 1
mean[idx,:] += x[i]
idx = assign.argmax()
return mean[idx,:] / assign[idx]
# def best_kmeans(pred):
# plt.scatter(pred[:,0], pred[:,1], color='b')
# c,v = kmeans(pred, 3)
# plt.scatter(c[:,0], c[:,1], color='g')
# n = most_assigned(pred, c)
# plt.scatter(c[n,0], c[n,1], color='r')
# plt.show()
def clustering_joints(y_pred, k=3):
_,nb_spl,nb_joints,dim = y_pred.shape
y = np.zeros((nb_spl, nb_joints, dim))
for s in range(nb_spl):
for j in range(nb_joints):
d = y_pred[:,s,j]
c,v = kmeans(d, k)
n = most_assigned(d, c)
y[s,j,:] = c[n]
return y
def clustering_grid(y_pred, size=10):
_, nb_spl, nb_joints, dim = y_pred.shape
assert dim == 2
yp = np.zeros((nb_spl, nb_joints, dim))
for s in range(nb_spl):
for j in range(nb_joints):
d = y_pred[:,s,j,:]
xmin = d[:,0].min()
ymin = d[:,1].min()
xmax = d[:,0].max()
ymax = d[:,1].max()
xstep = (xmax - xmin) / size
ystep = (ymax - ymin) / size
c = np.zeros((size * size, dim))
for x in range(size):
for y in range(size):
c[x + size*y, 0] = xmin + (x + 0.5) * xstep
c[x + size*y, 1] = ymin + (y + 0.5) * ystep
yp[s,j,:] = mean_on_most_assigned(d, c)
return yp
def mean_joints(y_pred):
_, nb_spl, dim, nb_joints = y_pred.shape
assert dim == 2
yp = np.zeros((nb_spl, dim, nb_joints))
for s in range(nb_spl):
for j in range(nb_joints):
d = y_pred[:,s,:,j]
yp[s, 0, j] = d[:,0].mean()
yp[s, 1, j] = d[:,1].mean()
return yp
| 29.673684 | 76 | 0.506208 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 603 | 0.213906 |
381c5c0ab457121c1be3d4dc192a40949d85e8c1 | 1,378 | py | Python | isdbeads/__init__.py | michaelhabeck/bayesian-random-tomography | 9429a3688df368f0fe8fd7beaa8202386951164a | [
"MIT"
] | 2 | 2021-04-17T14:05:05.000Z | 2022-02-24T16:03:29.000Z | isdbeads/__init__.py | michaelhabeck/bayesian-random-tomography | 9429a3688df368f0fe8fd7beaa8202386951164a | [
"MIT"
] | null | null | null | isdbeads/__init__.py | michaelhabeck/bayesian-random-tomography | 9429a3688df368f0fe8fd7beaa8202386951164a | [
"MIT"
] | null | null | null | from .universe import (
Universe,
Particle
)
from .probability import (
Probability
)
from .likelihood import (
Likelihood,
Normal,
Exponential,
LowerUpper,
Logistic,
GaussianMixture,
Relu
)
from .model import (
ModelDistances,
ModelImage,
ModelVolume,
RadiusOfGyration,
ProjectedCloud,
ModelFactory
)
from .grid import (
Grid
)
from .params import (
Volume,
Image,
Parameters,
Forces,
Location,
Precision,
Scale,
Coordinates,
Distances,
Rotation
)
from .prior import (
BoltzmannEnsemble,
TsallisEnsemble
)
from .forcefield import (
ForcefieldFactory
)
from .nblist import (
NBList
)
from .posterior import (
ConditionalPosterior,
PosteriorCoordinates
)
from .data import (
HiCData,
HiCParser
)
from .mcmc import (
RandomWalk,
AdaptiveWalk,
Ensemble
)
from .hmc import (
HamiltonianMonteCarlo
)
from .rex import (
ReplicaExchange,
ReplicaHistory,
ReplicaState
)
from .core import (
take_time
)
from .utils import (
rdf,
crosscorr,
image_center,
ChainViewer,
create_universe,
random_sphere,
segment_structure
)
from .chromosome import (
ChromosomeSimulation
)
from .inference import (
AdaptiveWalk,
RotationSampler,
HamiltonianMonteCarlo
)
| 12.759259 | 26 | 0.656749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
381e48031db6b3bb9ab5198242c5568c48b161ff | 2,668 | py | Python | ag-res.py | Murilodsv/py-jules | 601ca9e6fbad7fe82404f5296babb04583ddda95 | [
"MIT"
] | null | null | null | ag-res.py | Murilodsv/py-jules | 601ca9e6fbad7fe82404f5296babb04583ddda95 | [
"MIT"
] | null | null | null | ag-res.py | Murilodsv/py-jules | 601ca9e6fbad7fe82404f5296babb04583ddda95 | [
"MIT"
] | 1 | 2020-04-13T17:23:54.000Z | 2020-04-13T17:23:54.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 20 13:32:20 2021
#--- ag csv results to single file based on dashboard_dbs
#--- Murilo Vianna (murilodsv@gmail.com)
#--- Jul, 2021.
#--- Dev-log in: https://github.com/Murilodsv/py-jules
@author: muril
"""
# DEBUG import os; os.chdir('C:/Murilo/py-jules')
#------------------------------#
#--- generate qsub-clusters ---#
#------------------------------#
dash_nm = 'dashboard_db.csv' # Filename of Dashboard CSV
wd_out = 'ginore/csv_res'
sim_id = 'future-crop'
dap_f = range(1,361)
var_nm = 'fsmc'
var_ag = 'pft'
f_sufix = '.day.time_pft_y_x.csv'
#--- Get scripts arguments
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
#--- use arguments
dash_nm = str(sys.argv[1]) # debug dash_nm = 'dashboard_db_future.csv'
wd_out = str(sys.argv[2])
sim_id = str(sys.argv[3])
dap_f = str(sys.argv[4])
var_nm = str(sys.argv[5])
var_ag = str(sys.argv[6])
f_sufix = str(sys.argv[7])
import numpy as np
dap_f = '1,360'
dap_f = np.array(dap_f.split(',')) # arg_run_id = np.array(str('SC3572').split(','))
if len(dap_f) == 1:
dap_f = range(0,int(dap_f))
else:
dap_f = range(int(dap_f[0]), int(dap_f[1])+1)
#----------------------#
#--- Load libraries ---#
#----------------------#
import os
import os.path
import util as u
from time import time
#--- Track progress
run_start = time()
#----------------------#
#--- Read dashboard ---#
#----------------------#
#--- get run wd
wd = os.getcwd().replace('\\','/')
#--- Open CSVs
dash = u.df_csv(wd+'/'+dash_nm)
#--- list of clusters
l_ids = dash['run_id'].unique()
ini_df = True
for i in l_ids:
#--- filename
fn = wd+'/jules_run/'+i+'/namelists/output/'+i+f_sufix
if os.path.exists(fn):
print('Reading '+i)
#--- read file
df_i = u.df_csv(fn)
#--- filter dap
df_i = df_i[:][df_i['dap'].isin(range(1,360))]
#--- aggregate the var_nm by mean and var_ag
df_i = df_i.groupby(var_ag, as_index=False)[var_nm].mean()
#--- flag with run_id
df_i['run_id'] = i
#--- bind to single df
if ini_df:
df_res = df_i
ini_df = False
else:
df_res = df_res.append(df_i)
#--- write results
df_res.to_csv(wd+'/'+wd_out+'/'+sim_id+f_sufix, index = None, header=True)
#--- track time
print("\nElapsed time of copying: --- %.3f seconds ---" % (time() - run_start))
| 24.477064 | 97 | 0.508996 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,182 | 0.443028 |
3820fe50d4dce95d717e614296b340c0f9ebd66c | 1,294 | py | Python | gui/Color.py | brianjimenez/emol | b789b85b40a99247f008fb7cafa0d019d142cd3c | [
"MIT"
] | null | null | null | gui/Color.py | brianjimenez/emol | b789b85b40a99247f008fb7cafa0d019d142cd3c | [
"MIT"
] | null | null | null | gui/Color.py | brianjimenez/emol | b789b85b40a99247f008fb7cafa0d019d142cd3c | [
"MIT"
] | null | null | null | '''
Created on Oct 10, 2012
@author: Brian Jimenez-Garcia
@contact: brian.jimenez@bsc.es
'''
class Color:
def __init__(self, red=0., green=0., blue=0., alpha=1.0):
self.__red = red
self.__green = green
self.__blue = blue
self.__alpha = alpha
def get_rgba(self):
return self.__red, self.__green, self.__blue, self.__alpha
def get_red(self):
return self.__red
def get_blue(self):
return self.__blue
def get_green(self):
return self.__green
def get_alpha(self):
return self.__alpha
# Useful predefined colors
White = Color(1.0, 1.0, 1.0, 1.0)
Black = Color(0.0, 0.0, 0.0, 1.0)
Carbon = Color(0.17, 0.17, 0.18, 1.0)
Red = Color(0.95, 0.03, 0.01, 1.0)
Blue = Color(0.01, 0.03, 0.95, 1.0)
Sky = Color(0.233, 0.686, 1.0, 1.0)
Yellow = Color(1.0, 1.0, 0.0, 1.0)
Green = Color(0.0, 0.53, 0.0, 1.0)
Pink = Color(0.53, 0.12, 0.36, 1.0)
DarkRed = Color(0.59, 0.13, 0.0, 1.0)
Violet = Color(0.46, 0.0, 1.0, 1.0)
DarkViolet = Color(0.39, 0.0, 0.73, 1.0)
Cyan = Color(0.0, 1.0, 1.0, 1.0)
Orange = Color(1.0, 0.59, 0.0, 1.0)
Peach = Color(1.0, 0.66, 0.46, 1.0)
DarkGreen = Color(0.0, 0.46, 0.0, 1.0)
Gray = Color(0.59, 0.59, 0.59, 1.0)
DarkOrange = Color(0.86, 0.46, 0.0, 1.0) | 26.408163 | 66 | 0.578053 | 515 | 0.397991 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.091963 |
38225c73c048ecdb417dfedadbfbcef4ca7108c0 | 917 | py | Python | pnc_cli/pnc_help_formatter.py | vibe13/pnc-cli | 9020462cac5254bdd40cc7d8fa239433242cce45 | [
"Apache-2.0"
] | 2 | 2016-05-18T15:01:34.000Z | 2016-08-11T14:04:17.000Z | pnc_cli/pnc_help_formatter.py | vibe13/pnc-cli | 9020462cac5254bdd40cc7d8fa239433242cce45 | [
"Apache-2.0"
] | 47 | 2016-06-23T19:58:40.000Z | 2020-03-10T17:58:11.000Z | pnc_cli/pnc_help_formatter.py | vibe13/pnc-cli | 9020462cac5254bdd40cc7d8fa239433242cce45 | [
"Apache-2.0"
] | 21 | 2016-05-30T20:34:17.000Z | 2021-09-07T13:22:20.000Z | import argparse
__author__ = 'aabulawi'
class PNCFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
def _expand_help(self, action):
params = dict(vars(action), prog=self._prog)
for name in list(params):
if params[name] is argparse.SUPPRESS:
del params[name]
for name in list(params):
if hasattr(params[name], '__name__'):
params[name] = params[name].__name__
if params.get('choices') is not None:
choices_str = ', '.join([str(c) for c in params['choices']])
params['choices'] = choices_str
if 'default' in params:
if params['default'] is None:
params['default'] = 'null'
else:
params['default'] = repr(params['default'])
return self._get_help_string(action) % params
| 33.962963 | 72 | 0.579062 | 873 | 0.952017 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.111232 |
382269a9358bb58a18f501c0bd286c171e8ac7d7 | 775 | py | Python | Spell Compendium/scr/Spell1148 - Nimbus of Light.py | Sagenlicht/ToEE_Mods | a4b07f300df6067f834e09fcbc4c788f1f4e417b | [
"MIT"
] | 1 | 2021-04-26T08:03:56.000Z | 2021-04-26T08:03:56.000Z | Spell Compendium/scr/Spell1148 - Nimbus of Light.py | Sagenlicht/ToEE_Mods | a4b07f300df6067f834e09fcbc4c788f1f4e417b | [
"MIT"
] | 2 | 2021-06-11T05:55:01.000Z | 2021-08-03T23:41:02.000Z | Spell Compendium/scr/Spell1148 - Nimbus of Light.py | Sagenlicht/ToEE_Mods | a4b07f300df6067f834e09fcbc4c788f1f4e417b | [
"MIT"
] | 1 | 2021-05-17T15:37:58.000Z | 2021-05-17T15:37:58.000Z | from toee import *
def OnBeginSpellCast(spell):
print "Nimbus of Light OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
def OnSpellEffect(spell):
print "Nimbus of Light OnSpellEffect"
spell.duration = 10 * spell.caster_level
spellTarget = spell.target_list[0]
spellTarget.obj.condition_add_with_args('sp-Nimbus of Light', spell.id, spell.duration, 0, 0) #3rd arg = roundsCharged; 4th arg = attack_hit_status
spellTarget.partsys_id = game.particles('sp-Heroism', spellTarget.obj)
spell.spell_end(spell.id)
def OnBeginRound(spell):
print "Nimbus of Light OnBeginRound"
def OnEndSpellCast(spell):
print "Nimbus of Light OnEndSpellCast"
| 31 | 151 | 0.740645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 264 | 0.340645 |
38229cdc49513c553f97ed67ce3eaa6b0ba92dcb | 17,258 | py | Python | pyBAScloudAPI/examples/main.py | bascloud/BASCloudAPI | 6a06d430720e99204f84f5362b4f22d7d4a72b76 | [
"MIT"
] | 3 | 2021-04-30T07:44:11.000Z | 2021-05-03T06:35:01.000Z | pyBAScloudAPI/examples/main.py | bascloud/BASCloudAPI | 6a06d430720e99204f84f5362b4f22d7d4a72b76 | [
"MIT"
] | 9 | 2021-06-23T04:21:51.000Z | 2022-01-17T04:15:06.000Z | pyBAScloudAPI/examples/main.py | bascloud/BAScloudAPI | 6a06d430720e99204f84f5362b4f22d7d4a72b76 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
import pyBAScloudAPI as api
def printErrorHandler(exception, json):
print("\t\tException occured in request: ", exception, json)
print("Testing util functions.")
print("\t2021-04-26T10:56:58.000Z = ", api.Util.parseDateTimeString(dateTime="2021-04-26T10:56:58.000Z"))
assert api.Util.parseDateTimeString(dateTime="2021-04-26T10:56:58.000Z") == 1619427418
print("\tParameters:", api.Util.parseURLParameter(url="test.local/tenants/XXX/connectors/XXX/devices?page[size]=1&page[before]=Mzk5ZTM1MWYtNTI3OS00YzFhLTk0MmUtYTZiODBmMjFiYzVh"))
print("Demo of library methods for BAScloud API endpoints.")
print("Initialising library...")
BCAPI = api.EntityContext("https://basc-prd-apm-euw.azure-api.net/v2")
print("\tOK.")
print("1. - Authentication with user login")
BCAPI.authenticateWithUserLogin(email="erhardt@profm-gmbh.de", password="Dont4get$1")
print("\tOK.")
print("\tAuthenticated: ", BCAPI.isAuthenticated())
print("\tToken valid until: ", datetime.datetime.fromtimestamp(BCAPI.getTokenExpirationDate()))
print("\tToken: ", BCAPI.getToken())
print("2. - Access user information")
print("\tRequesting all users...")
users = BCAPI.getUsersCollection(errorHandler=printErrorHandler)
print("\t\tOK.")
print("\tFound users: ", len(users[0]))
print("\tRequesting single users with UUID...")
currentUser = BCAPI.getUser(users[0][0].uuid)
print("\t\tOK.")
print("\tUser UUID: ", currentUser.uuid)
print("\tUser Email: ", currentUser.email)
print("\tUser created at: ", datetime.datetime.fromtimestamp(currentUser.createdAt))
print("\tUser updated at: ", datetime.datetime.fromtimestamp(currentUser.updatedAt))
print("\tRequesting associated tenant...")
assoc_tenant = BCAPI.getAssociatedTenant(currentUser.uuid)
print("\t\tOK.")
print("\tTenant UUID: ", assoc_tenant.uuid)
print("3. - Access tenant information")
print("\tRequesting all tenants...")
tenants = BCAPI.getTenantsCollection()
print("\t\tOK.")
print("\tFound tenants: ", len(tenants[0]))
print("\tRequesting single tenant with UUID...")
tenant = BCAPI.getTenant(tenants[0][0].uuid)
print("\t\tOK.")
print("\tTenant UUID: ", tenant.uuid)
print("\tTenant Name: ", tenant.name)
print("\tTenant URL Name: ", tenant.urlName)
print("\tTenant created at: ", datetime.datetime.fromtimestamp(tenant.createdAt))
print("\tTenant updated at: ", datetime.datetime.fromtimestamp(tenant.updatedAt))
print("\tRequesting associated users of tenant...")
tenant_user = BCAPI.getAssociatedUsers(tenant.uuid)
print("\t\tOK.")
print("\tFound tenant users: ", len(tenant_user[0]))
# print("3.5 - Create tenant and update")
# print("\tCreating new tenant...")
# new_tenant = BCAPI.createTenant("MoritzTestTenant", currentUser.uuid);
# print("\t\tOK.")
# print("\tNew tenant UUID: ", new_tenant.uuid)
# print("\tRequesting newly created tenant with UUID...")
# new_re_tenant = BCAPI.getTenant(new_tenant.uuid);
# print("\t\tOK.")
# print("\tTenant UUID: ", new_re_tenant.uuid)
# print("\tTenant Name: ", new_re_tenant.name)
# print("\tTenant URL Name: ", new_re_tenant.urlName)
# print("\tTenant created at: ", datetime.datetime.fromtimestamp(new_re_tenant.createdAt))
# print("\tTenant updated at: ", datetime.datetime.fromtimestamp(new_re_tenant.createdAt))
# print("\tUpdating newly created tenant...")
# up_re_tenant = BCAPI.updateTenant(new_re_tenant.uuid, "MoritzTestTenant2")
# print("\t\tOK.")
# print("\tTenant Name: ", up_re_tenant.getName())
# print("\tRequesting associated users of tenant...")
# re_tenant_user = BCAPI.getAssociatedUsers(new_re_tenant.uuid)
# print("\t\tOK.")
# print("\tFound tenant users: ", len(re_tenant_user[0]))
# print("\tDeleting newly created tenant...")
# BCAPI.deleteTenant(new_re_tenant.uuid)
# print("\t\tOK.")
print("4. - Access property information")
print("\tRequesting all properties...")
props = BCAPI.getPropertiesCollection(tenant.uuid)
print("\t\tOK.")
print("\tFound properties: ", len(props[0]))
print("\tRequesting single property with UUID...")
print("\tProperty UUID: ", props[0][0].uuid)
property = BCAPI.getProperty(tenant.uuid, props[0][0].uuid)
print("\t\tOK.")
print("\tProperty UUID: ", property.uuid)
print("\tProperty Name: ", property.name)
print("\tProperty Address: {}, {} {}, {}".format(property.street, property.postalCode, property.city, property.country))
print("\tProperty created at: ", datetime.datetime.fromtimestamp(property.createdAt))
print("\tProperty updated at: ", datetime.datetime.fromtimestamp(property.updatedAt))
print("\tRequesting associated connectors of the property...")
prop_connectors = BCAPI.getAssociatedConnectors(tenant.uuid, property.uuid)
print("\t\tOK.")
print("\tFound property connectors: ", len(prop_connectors[0]))
print("4.5 - Create property and update")
print("\tCreating new property...")
new_property = BCAPI.createProperty(tenant.uuid, "MoritzTestProperty", "Street", "12345", "City", "Country")
print("\t\tOK.")
print("\tNew property UUID: ", new_property.uuid)
print("\tRequesting newly created property with UUID...")
new_re_property = BCAPI.getProperty(tenant.uuid,new_property.uuid)
print("\t\tOK.")
print("\tProperty UUID: ", new_re_property.uuid)
print("\tProperty Name: ", new_re_property.name)
print("\tProperty Address: {}, {} {}, {}".format(new_re_property.street, new_re_property.postalCode, new_re_property.city, new_re_property.country))
print("\tProperty created at: ", datetime.datetime.fromtimestamp(new_re_property.createdAt))
print("\tProperty updated at: ", datetime.datetime.fromtimestamp(new_re_property.updatedAt))
print("\tUpdating newly created property...")
up_re_property = BCAPI.updateProperty(tenant.uuid, new_re_property.uuid, "MoritzTestProperty2");
print("\t\tOK.")
print("\tProperty Name: ", up_re_property.name)
# Should be always be empty see next section create new connector for it
print("\tRequesting associated connectors of the property...")
new_prop_connectors = BCAPI.getAssociatedConnectors(tenant.uuid, up_re_property.uuid)
print("\t\tOK.")
print("\tFound property connectors: ", len(new_prop_connectors[0]))
# print("\tDeleting newly created property...")
# BCAPI.deleteProperty(tenant.uuid, up_re_property.uuid)
# print("\t\tOK.")
print("5. - Access connector information")
print("\tRequesting all connectors...")
connectors = BCAPI.getConnectorsCollection(tenant.uuid)
print("\t\tOK.")
print("\tFound connectors: ", len(connectors[0]))
print("\tRequesting single connector with UUID...")
print("\tConnector UUID: ", connectors[0][0].uuid)
connector = BCAPI.getConnector(tenant.uuid, connectors[0][0].uuid)
print("\t\tOK.")
print("\tConnector UUID: ", connector.uuid)
print("\tConnector Name: ", connector.name)
print("\tConnector created at: ", datetime.datetime.fromtimestamp(connector.createdAt))
print("\tConnector updated at: ", datetime.datetime.fromtimestamp(connector.updatedAt))
print("\tRequesting associated property of the connector again...")
conn_prop = BCAPI.getAssociatedProperty(tenant.uuid, connector.uuid)
print("\t\tOK.")
print("\tConnector's property UUID: ", conn_prop.uuid)
print("\tRequest connector's associated devices...")
max_devices = 0
for conn in connectors[0]:
conn_devices = BCAPI.getAssociatedDevices(tenant.uuid, conn.uuid)
if len(conn_devices[0]) > max_devices:
max_devices = len(conn_devices[0])
connector = conn
print("\t\tFound connector with ", len(conn_devices[0]), " devices")
print("5.5 - Create connector and update")
print("\tCreating new connector...")
new_connector = BCAPI.createConnector(tenant.uuid, new_property.uuid, "MoritzTestConnector")
print("\t\tOK.")
print("\tNew connector UUID: ", new_connector.uuid)
print("\tRequesting new API key for created connector...")
connectorToken = BCAPI.getNewConnectorAuthToken(tenant.uuid, new_connector.uuid)
print("\t\tOK.")
print("\tConnector Auth. Token: ", connectorToken)
print("\tRequesting newly created connector with UUID...")
new_re_connector = BCAPI.getConnector(tenant.uuid, new_connector.uuid)
print("\t\tOK.")
print("\tConnector UUID: ", new_re_connector.uuid)
print("\tConnector Name: ", new_re_connector.name)
print("\tConnector created at: ", datetime.datetime.fromtimestamp(new_re_connector.createdAt))
print("\tConnector updated at: ", datetime.datetime.fromtimestamp(new_re_connector.createdAt))
print("\tUpdating newly created connector...")
up_re_connector = BCAPI.updateConnector(tenant.uuid, new_re_connector.uuid, "MoritzTestConnector2");
print("\t\tOK.")
print("\tConnector Name: ", up_re_connector.name)
print("\tRequesting associated connectors of the new property...")
new_prop_connectors = BCAPI.getAssociatedConnectors(tenant.uuid, new_property.uuid);
print("\t\tOK.")
print("\tFound property connectors: ", len(new_prop_connectors[0]))
# print("\tDeleting newly created connector...")
# BCAPI.deleteConnector(tenant.uuid, new_connector.uuid);
# print("\t\tOK.")
# print("\tDeleting newly created property...")
# BCAPI.deleteProperty(tenant.uuid, new_property.uuid);
# print("\t\tOK.")
print("6. - Access device information")
print("\tRequesting all devices...")
devices = BCAPI.getDevicesCollection(tenant.uuid)
print("\t\tOK.")
print("\tFound devices: ", len(devices[0]))
print("\tRequesting single device with UUID...")
print("\tDevice UUID: ", devices[0][0].uuid)
device = BCAPI.getDevice(tenant.uuid, devices[0][0].uuid)
print("\t\tOK.")
print("\tDevice UUID: ", device.uuid)
print("\tDevice AKS ID: ", device.aksID)
print("\tDevice Description: ", device.description)
print("\tDevice Unit: ", device.unit)
print("\tDevice created at: ", datetime.datetime.fromtimestamp(device.createdAt))
print("\tDevice updated at: ", datetime.datetime.fromtimestamp(device.updatedAt))
print("\tRequesting associated connector of the device again...")
device_conn = BCAPI.getAssociatedConnector(tenant.uuid, device.uuid)
print("\t\tOK.")
print("\tDevice's Connector UUID: ", device_conn.uuid)
print("\tRequesting device associated readings...")
max_readings = 0
for d in devices[0]:
device_readings = BCAPI.getAssociatedReadings(tenant.uuid, d.uuid)
if len(device_readings[0]) > max_readings:
max_readings = len(device_readings[0])
device = d
print("\t\tFound device with ", len(device_readings[0]), " readings")
print("\tRequesting paginated device associated readings...")
paging = api.PagingOption(10, api.PagingOption.Direction.NONE, "")
device_readings = BCAPI.getAssociatedReadings(tenant.uuid, device.uuid, paging)
print("\t\tOK.")
print("\tFound readings: ", len(device_readings[0]))
for r in device_readings[0]:
print("\t\tReading: ", datetime.datetime.fromtimestamp(r.timestamp), " - ", r.value)
print("\tRequesting device associated setpoints...")
max_setpoints = 0
for d in devices[0]:
device_setpoint = BCAPI.getAssociatedSetPoints(tenant.uuid, d.uuid, paging)
if len(device_setpoint[0]) > max_setpoints:
max_setpoints = len(device_setpoint[0])
device = d
print("\t\tFound device with ", len(device_setpoint[0]), " setpoints")
print("\tRequesting paginated device associated setpoints...")
device_setpoint = BCAPI.getAssociatedSetPoints(tenant.uuid, device.uuid, paging)
print("\t\tOK.")
print("\tFound setpoints: ", len(device_setpoint[0]))
for sp in device_setpoint[0]:
print("\t\Setpoint: ", datetime.datetime.fromtimestamp(sp.timestamp), " - ", sp.value)
print("6.5 - Create device and update")
print("\tCreating new device...")
new_device = BCAPI.createDevice(tenant.uuid, new_connector.uuid, "MoritzTestAKS1000", "TestDevice", "m3")
print("\t\tOK.")
print("\tNew device UUID: ", new_device.uuid)
print("\tRequesting newly created device with UUID...")
new_re_device = BCAPI.getDevice(tenant.uuid, new_device.uuid)
print("\t\tOK.")
print("\tDevice UUID: ", new_re_device.uuid)
print("\tDevice AKS ID: ", new_re_device.aksID)
print("\tDevice Description: ", new_re_device.description)
print("\tDevice Unit: ", new_re_device.unit)
print("\tDevice created at: ", datetime.datetime.fromtimestamp(new_re_device.createdAt))
print("\tDevice updated at: ", datetime.datetime.fromtimestamp(new_re_device.updatedAt))
print("\tUpdating newly created device...")
up_re_device = BCAPI.updateDevice(tenant.uuid, new_re_device.uuid, "MoritzTestAKS1001");
print("\t\tOK.")
print("\tDevice AKS ID: ", up_re_device.aksID)
print("\tRequesting associated connector of the new device...")
new_dev_connector = BCAPI.getAssociatedConnector(tenant.uuid, up_re_device.uuid)
print("\t\tOK.")
print("\tFound device connector: ", new_dev_connector.uuid)
# print("\tDeleting newly created device...")
# BCAPI.deleteDevice(tenant.uuid, new_device.uuid)
# print("\t\tOK.")
print("7. - Access reading information")
print("\tRequesting all readings...")
readings = BCAPI.getReadingsCollection(tenant.uuid)
print("\t\tOK.")
print("\tFound readings: ", len(readings[0]))
if len(readings[0]) > 0:
print("\tRequesting single reading with UUID...")
print("\tReading UUID: ", readings[0][0].uuid)
reading = BCAPI.getReading(tenant.uuid, readings[0][0].uuid)
print("\t\tOK.")
print("\tReading UUID: ", reading.uuid)
print("\tReading Value: ", reading.value)
print("\tReading timestamp: ", datetime.datetime.fromtimestamp(reading.timestamp))
print("\tReading created at: " , datetime.datetime.fromtimestamp(reading.createdAt))
print("\tReading updated at: ", datetime.datetime.fromtimestamp(reading.updatedAt))
print("\tRequesting associated device of the reading again...")
read_device = BCAPI.getAssociatedDevice(tenant.uuid, reading.uuid)
print("\t\tOK.")
print("\tReadings's Device UUID: ", read_device.uuid)
print("8. - Create new reading")
currentDateTime = int(datetime.datetime.now().timestamp())
new_read = BCAPI.createReading(tenant.uuid, new_device.uuid, 1234.56, currentDateTime);
print("\t\tOK.")
print("\tRequesting created reading information again... ")
new_re_reading = BCAPI.getReading(tenant.uuid, new_read.uuid)
print("\t\tOK.")
print("\tReading UUID: ", new_re_reading.uuid)
print("\tReading Value: ", new_re_reading.value)
print("\tReading timestamp: ", datetime.datetime.fromtimestamp(new_re_reading.timestamp))
print("\tReading created at: ", datetime.datetime.fromtimestamp(new_re_reading.createdAt))
print("\tReading updated at: ", datetime.datetime.fromtimestamp(new_re_reading.updatedAt))
print("\tRequesting paginated device associated readings...")
new_device_readings = BCAPI.getAssociatedReadings(tenant.uuid, new_device.uuid, paging)
print("\t\tOK.")
print("\tFound readings: ", len(new_device_readings[0]))
for sp in new_device_readings[0]:
print("\t\tReading: ",datetime.datetime.fromtimestamp(sp.timestamp), " - ", sp.value)
print("\tRequesting deletion of created reading again... ")
BCAPI.deleteReading(tenant.uuid, new_re_reading.uuid)
print("\t\tOK.")
print("9. - Access setpoint information")
print("\tRequesting all setpoints...")
setpoints = BCAPI.getSetPointsCollection(tenant.uuid)
print("\t\tOK.")
print("\tFound setpoints: ", len(setpoints[0]))
if len(setpoints[0]) > 0:
print("\tRequesting single setpoint with UUID...")
print("\tSetPoint UUID: ", setpoints[0][0].uuid)
setpoint = BCAPI.getSetPoint(tenant.uuid, setpoints[0][0].uuid)
print("\t\tOK.")
print("\tSetPoint UUID: ", setpoint.uuid)
print("\tSetPoint Value: ", setpoint.value)
print("\tSetPoint timestamp: ", datetime.datetime.fromtimestamp(setpoint.timestamp))
print("\tSetPoint created at: ", datetime.datetime.fromtimestamp(setpoint.createdAt))
print("\tSetPoint updated at: ", datetime.datetime.fromtimestamp(setpoint.updatedAt))
print("10. - Create new setpoint")
currentDateTime = int(datetime.datetime.now().timestamp())
new_setPoint = BCAPI.createSetPoint(tenant.uuid, new_device.uuid, 2345.67, currentDateTime)
print("\t\tOK.")
print("\tRequesting created setpoint information again... ")
new_re_setpoint = BCAPI.getSetPoint(tenant.uuid, new_setPoint.uuid)
print("\t\tOK.")
print("\tSetPoint UUID: ", new_re_setpoint.uuid)
print("\tSetPoint Value: ", new_re_setpoint.value)
print("\tSetPoint timestamp: ", datetime.datetime.fromtimestamp(new_re_setpoint.timestamp))
print("\tSetPoint created at: ", datetime.datetime.fromtimestamp(new_re_setpoint.createdAt))
print("\tSetPoint updated at: ", datetime.datetime.fromtimestamp(new_re_setpoint.updatedAt))
print("\tRequesting paginated device associated setpoints...")
new_device_setpoint = BCAPI.getAssociatedSetPoints(tenant.uuid, new_device.uuid, paging)
print("\t\tOK.")
print("\tFound setpoints: ", len(new_device_setpoint[0]))
for sp in new_device_setpoint[0]:
print("\t\tSetPoint: ", datetime.datetime.fromtimestamp(sp.timestamp), " - ", sp.value)
print("12. - Deleting created entities")
# // print("\tDeleting created device...")
# // BCAPI.deleteDevice(tenant.uuid, new_device.uuid);
# // print("\t\tOK.")
print("\tDeleting created connector...")
BCAPI.deleteConnector(tenant.uuid, new_connector.uuid)
print("\t\tOK.")
print("\tDeleting created property...")
BCAPI.deleteProperty(tenant.uuid, new_property.uuid)
print("\t\tOK.") | 30.224168 | 178 | 0.735195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,510 | 0.435161 |
3824d6d18fbcaf0b52fe4f9be5410406a8d51803 | 1,498 | py | Python | zeus/util/tests/output.py | ZachAnders/OPLabs | 085030e60c23292c7860817233373ab6e1b19165 | [
"BSD-2-Clause"
] | 1 | 2018-02-06T17:43:51.000Z | 2018-02-06T17:43:51.000Z | zeus/util/tests/output.py | ZachAnders/OPLabs | 085030e60c23292c7860817233373ab6e1b19165 | [
"BSD-2-Clause"
] | null | null | null | zeus/util/tests/output.py | ZachAnders/OPLabs | 085030e60c23292c7860817233373ab6e1b19165 | [
"BSD-2-Clause"
] | null | null | null | __COL_GOOD = '\033[32m'
__COL_FAIL = '\033[31m'
__COL_INFO = '\033[34m'
__COL_BOLD = '\033[1m'
__COL_ULIN = '\033[4m'
__COL_ENDC = '\033[0m'
def __TEST__(status, msg, color, args):
args = ", ".join([str(key)+'='+str(args[key]) for key in args.keys()])
if args:
args = "(" + args + ")"
return "[{color}{status}{end}] {msg} {args}".format(
color=color,
status=status,
end=__COL_ENDC,
msg=msg,
args=args
)
def SUCCESS(test_name, **kwargs):
msg = "Test {tname} passed.".format(tname=test_name)
return __TEST__('PASS', msg, __COL_GOOD, kwargs)
def FAILURE(test_name, **kwargs):
msg = "Test {tname} failed.".format(tname=test_name)
return __TEST__('FAIL', msg, __COL_FAIL, kwargs)
def ANSI_wrapper(prefix):
def inner(message):
return prefix + message + __COL_ENDC
return inner
def truncate_repr(val, priority=None):
if priority and isinstance(val, dict):
val_copy = dict(val)
output = '{'
for k, v in priority.items():
output += "%s, %s" % (k, v)
val_copy.pop(k)
output += ", " + str(val_copy)[1:]
else:
output = str(val)
if len(output) <= 64:
return output
output = output[:64]
if isinstance(val, dict):
output += "...}"
elif isinstance(val, list):
output += "...]"
return output
INFO = ANSI_wrapper(__COL_INFO)
BOLD = ANSI_wrapper(__COL_BOLD)
UNDERLINE = ANSI_wrapper(__COL_ULIN)
| 27.236364 | 74 | 0.586115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.126836 |
3825d70d19be743114f68382ae76784d0aa81fda | 604 | py | Python | Chapter02/05_embossing_advance.py | yanboyang713/OpenCV-3-x-with-Python-By-Example | 7393745014e0108985b860cb9b45c1c72dc0180d | [
"MIT"
] | null | null | null | Chapter02/05_embossing_advance.py | yanboyang713/OpenCV-3-x-with-Python-By-Example | 7393745014e0108985b860cb9b45c1c72dc0180d | [
"MIT"
] | null | null | null | Chapter02/05_embossing_advance.py | yanboyang713/OpenCV-3-x-with-Python-By-Example | 7393745014e0108985b860cb9b45c1c72dc0180d | [
"MIT"
] | null | null | null | import cv2
import numpy as np
img = cv2.imread('./images/geometrics_input.png', cv2.IMREAD_GRAYSCALE)
rows, cols = img.shape
# It is used depth of cv2.CV_64F.
sobel_horizontal = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=5)
# Kernel size can be: 1,3,5 or 7.
sobel_vertical = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=5)
laplacian = cv2.Laplacian(img, cv2.CV_64F)
canny = cv2.Canny(img, 50, 240)
cv2.imshow('Original', img)
cv2.imshow('Sobel horizontal', sobel_horizontal)
cv2.imshow('Sobel vertical', sobel_vertical)
cv2.imshow('Laplacian', laplacian)
cv2.imshow('Canny', canny)
cv2.waitKey(0)
| 25.166667 | 71 | 0.718543 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 159 | 0.263245 |
382763467eeacdcf8102a2eb884ff148f1377e58 | 269 | py | Python | dataladmetadatamodel/text.py | yarikoptic/metadata-model | fac75a12f1c1ceaf111372dc1d22c8f39ce1481d | [
"MIT"
] | null | null | null | dataladmetadatamodel/text.py | yarikoptic/metadata-model | fac75a12f1c1ceaf111372dc1d22c8f39ce1481d | [
"MIT"
] | null | null | null | dataladmetadatamodel/text.py | yarikoptic/metadata-model | fac75a12f1c1ceaf111372dc1d22c8f39ce1481d | [
"MIT"
] | null | null | null | """
Instances of the Text class just contain
text. Their main use is as dummy-element
during model development.
"""
from dataclasses import dataclass
from dataladmetadatamodel.connector import ConnectedObject
@dataclass
class Text(ConnectedObject):
content: str
| 19.214286 | 58 | 0.799257 | 45 | 0.167286 | 0 | 0 | 56 | 0.208178 | 0 | 0 | 115 | 0.427509 |
382924f8e44ff0070db746377fb391d4fbcf5d44 | 790 | py | Python | server/apps/bot/views.py | LowerDeez/movies_finder | 3763bfe4c0d1cfe36e081c45a9cc9cdaa85e0ee4 | [
"MIT"
] | null | null | null | server/apps/bot/views.py | LowerDeez/movies_finder | 3763bfe4c0d1cfe36e081c45a9cc9cdaa85e0ee4 | [
"MIT"
] | null | null | null | server/apps/bot/views.py | LowerDeez/movies_finder | 3763bfe4c0d1cfe36e081c45a9cc9cdaa85e0ee4 | [
"MIT"
] | null | null | null | from rest_framework.views import APIView
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from .registry import registry
from .services.webhook import process_webhook_event
__all__ = (
'WebhookView',
)
class WebhookView(APIView):
permission_classes = (AllowAny,)
def post(self, request, *args, **kwargs):
token: str = kwargs['token']
print('TOKEN:', token)
print('REGISTRY:', registry)
dispatcher = registry.get_dispatcher(token)
print('DISPATCHER:', dispatcher)
dispatcher = process_webhook_event(
token=kwargs['token'],
request_body=request.data,
dispatcher=dispatcher
)
registry.register(dispatcher)
return Response()
| 27.241379 | 51 | 0.674684 | 535 | 0.677215 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.074684 |
3829f67a2282de2962daadc9c35410add7d48c53 | 161,798 | py | Python | subject/tests/functional/v2/test_images.py | laoyigrace/subject | e6ed989fdc250917a19788112b22322b73b3550f | [
"Apache-2.0"
] | null | null | null | subject/tests/functional/v2/test_images.py | laoyigrace/subject | e6ed989fdc250917a19788112b22322b73b3550f | [
"Apache-2.0"
] | null | null | null | subject/tests/functional/v2/test_images.py | laoyigrace/subject | e6ed989fdc250917a19788112b22322b73b3550f | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import signal
import uuid
from oslo_serialization import jsonutils
import requests
import six
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from six.moves import urllib
from subject.tests import functional
from subject.tests import utils as test_utils
TENANT1 = str(uuid.uuid4())
TENANT2 = str(uuid.uuid4())
TENANT3 = str(uuid.uuid4())
TENANT4 = str(uuid.uuid4())
class TestSubjects(functional.FunctionalTest):
def setUp(self):
super(TestSubjects, self).setUp()
self.cleanup()
self.api_server.deployment_flavor = 'noauth'
self.api_server.data_api = 'subject.db.sqlalchemy.api'
for i in range(3):
ret = test_utils.start_http_server("foo_subject_id%d" % i,
"foo_subject%d" % i)
setattr(self, 'http_server%d_pid' % i, ret[0])
setattr(self, 'http_port%d' % i, ret[1])
def tearDown(self):
for i in range(3):
pid = getattr(self, 'http_server%d_pid' % i, None)
if pid:
os.kill(pid, signal.SIGKILL)
super(TestSubjects, self).tearDown()
def _url(self, path):
return 'http://127.0.0.1:%d%s' % (self.api_port, path)
def _headers(self, custom_headers=None):
base_headers = {
'X-Identity-Status': 'Confirmed',
'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96',
'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e',
'X-Tenant-Id': TENANT1,
'X-Roles': 'member',
}
base_headers.update(custom_headers or {})
return base_headers
def test_v1_none_properties_v2(self):
self.api_server.deployment_flavor = 'noauth'
self.api_server.use_user_token = True
self.api_server.send_identity_credentials = True
self.registry_server.deployment_flavor = ''
# Subject list should be empty
self.start_servers(**self.__dict__.copy())
# Create an subject (with two deployer-defined properties)
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/octet-stream'})
headers.update(test_utils.minimal_headers('subject-1'))
# NOTE(flaper87): Sending empty string, the server will use None
headers['x-subject-meta-property-my_empty_prop'] = ''
response = requests.post(path, headers=headers)
self.assertEqual(201, response.status_code)
data = jsonutils.loads(response.text)
subject_id = data['subject']['id']
# NOTE(flaper87): Get the subject using V2 and verify
# the returned value for `my_empty_prop` is an empty
# string.
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)
self.assertEqual('', subject['my_empty_prop'])
self.stop_servers()
def test_not_authenticated_in_registry_on_ops(self):
# https://bugs.launchpad.net/glance/+bug/1451850
# this configuration guarantees that authentication succeeds in
# subject-api and fails in subject-registry if no token is passed
self.api_server.deployment_flavor = ''
# make sure that request will reach registry
self.api_server.data_api = 'subject.db.registry.api'
self.registry_server.deployment_flavor = 'fakeauth'
self.start_servers(**self.__dict__.copy())
headers = {'content-type': 'application/json'}
subject = {'name': 'subject', 'type': 'kernel', 'disk_format': 'qcow2',
'container_format': 'bare'}
# subject create should return 401
response = requests.post(self._url('/v1/subjects'), headers=headers,
data=jsonutils.dumps(subject))
self.assertEqual(401, response.status_code)
# subject list should return 401
response = requests.get(self._url('/v1/subjects'))
self.assertEqual(401, response.status_code)
# subject show should return 401
response = requests.get(self._url('/v1/subjects/somesubjectid'))
self.assertEqual(401, response.status_code)
# subject update should return 401
ops = [{'op': 'replace', 'path': '/protected', 'value': False}]
media_type = 'application/openstack-subjects-v1.1-json-patch'
response = requests.patch(self._url('/v1/subjects/somesubjectid'),
headers={'content-type': media_type},
data=jsonutils.dumps(ops))
self.assertEqual(401, response.status_code)
# subject delete should return 401
response = requests.delete(self._url('/v1/subjects/somesubjectid'))
self.assertEqual(401, response.status_code)
self.stop_servers()
def test_subject_lifecycle(self):
# Subject list should be empty
self.api_server.show_multiple_locations = True
self.start_servers(**self.__dict__.copy())
path = self._url('/v1/subjects')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(0, len(subjects))
# Create an subject (with two deployer-defined properties)
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json'})
data = jsonutils.dumps({'name': 'subject-1', 'type': 'kernel',
'foo': 'bar', 'disk_format': 'aki',
'container_format': 'aki', 'abc': 'xyz'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subject_location_header = response.headers['Location']
# Returned subject entity should have a generated id and status
subject = jsonutils.loads(response.text)
subject_id = subject['id']
checked_keys = set([
u'status',
u'name',
u'tags',
u'created_at',
u'updated_at',
u'visibility',
u'self',
u'protected',
u'id',
u'file',
u'min_disk',
u'foo',
u'abc',
u'type',
u'min_ram',
u'schema',
u'disk_format',
u'container_format',
u'owner',
u'checksum',
u'size',
u'virtual_size',
u'locations',
])
self.assertEqual(checked_keys, set(subject.keys()))
expected_subject = {
'status': 'queued',
'name': 'subject-1',
'tags': [],
'visibility': 'private',
'self': '/v1/subjects/%s' % subject_id,
'protected': False,
'file': '/v1/subjects/%s/file' % subject_id,
'min_disk': 0,
'foo': 'bar',
'abc': 'xyz',
'type': 'kernel',
'min_ram': 0,
'schema': '/v1/schemas/subject',
}
for key, value in expected_subject.items():
self.assertEqual(value, subject[key], key)
# Subject list should now have one entry
path = self._url('/v1/subjects')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(1, len(subjects))
self.assertEqual(subject_id, subjects[0]['id'])
# Create another subject (with two deployer-defined properties)
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json'})
data = jsonutils.dumps({'name': 'subject-2', 'type': 'kernel',
'bar': 'foo', 'disk_format': 'aki',
'container_format': 'aki', 'xyz': 'abc'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Returned subject entity should have a generated id and status
subject = jsonutils.loads(response.text)
subject2_id = subject['id']
checked_keys = set([
u'status',
u'name',
u'tags',
u'created_at',
u'updated_at',
u'visibility',
u'self',
u'protected',
u'id',
u'file',
u'min_disk',
u'bar',
u'xyz',
u'type',
u'min_ram',
u'schema',
u'disk_format',
u'container_format',
u'owner',
u'checksum',
u'size',
u'virtual_size',
u'locations',
])
self.assertEqual(checked_keys, set(subject.keys()))
expected_subject = {
'status': 'queued',
'name': 'subject-2',
'tags': [],
'visibility': 'private',
'self': '/v1/subjects/%s' % subject2_id,
'protected': False,
'file': '/v1/subjects/%s/file' % subject2_id,
'min_disk': 0,
'bar': 'foo',
'xyz': 'abc',
'type': 'kernel',
'min_ram': 0,
'schema': '/v1/schemas/subject',
}
for key, value in expected_subject.items():
self.assertEqual(value, subject[key], key)
# Subject list should now have two entries
path = self._url('/v1/subjects')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(2, len(subjects))
self.assertEqual(subject2_id, subjects[0]['id'])
self.assertEqual(subject_id, subjects[1]['id'])
# Subject list should list only subject-2 as subject-1 doesn't contain the
# property 'bar'
path = self._url('/v1/subjects?bar=foo')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(1, len(subjects))
self.assertEqual(subject2_id, subjects[0]['id'])
# Subject list should list only subject-1 as subject-2 doesn't contain the
# property 'foo'
path = self._url('/v1/subjects?foo=bar')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(1, len(subjects))
self.assertEqual(subject_id, subjects[0]['id'])
# The "changes-since" filter shouldn't work on subject v1
path = self._url('/v1/subjects?changes-since=20001007T10:10:10')
response = requests.get(path, headers=self._headers())
self.assertEqual(400, response.status_code)
path = self._url('/v1/subjects?changes-since=aaa')
response = requests.get(path, headers=self._headers())
self.assertEqual(400, response.status_code)
# Subject list should list only subject-1 based on the filter
# 'foo=bar&abc=xyz'
path = self._url('/v1/subjects?foo=bar&abc=xyz')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(1, len(subjects))
self.assertEqual(subject_id, subjects[0]['id'])
# Subject list should list only subject-2 based on the filter
# 'bar=foo&xyz=abc'
path = self._url('/v1/subjects?bar=foo&xyz=abc')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(1, len(subjects))
self.assertEqual(subject2_id, subjects[0]['id'])
# Subject list should not list anything as the filter 'foo=baz&abc=xyz'
# is not satisfied by either subjects
path = self._url('/v1/subjects?foo=baz&abc=xyz')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(0, len(subjects))
# Get the subject using the returned Location header
response = requests.get(subject_location_header, headers=self._headers())
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)
self.assertEqual(subject_id, subject['id'])
self.assertIsNone(subject['checksum'])
self.assertIsNone(subject['size'])
self.assertIsNone(subject['virtual_size'])
self.assertEqual('bar', subject['foo'])
self.assertFalse(subject['protected'])
self.assertEqual('kernel', subject['type'])
self.assertTrue(subject['created_at'])
self.assertTrue(subject['updated_at'])
self.assertEqual(subject['updated_at'], subject['created_at'])
# The URI file:// should return a 400 rather than a 500
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type})
url = ('file://')
changes = [{
'op': 'add',
'path': '/locations/-',
'value': {
'url': url,
'metadata': {}
}
}]
data = jsonutils.dumps(changes)
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(400, response.status_code, response.text)
# The subject should be mutable, including adding and removing properties
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type})
data = jsonutils.dumps([
{'op': 'replace', 'path': '/name', 'value': 'subject-2'},
{'op': 'replace', 'path': '/disk_format', 'value': 'vhd'},
{'op': 'replace', 'path': '/container_format', 'value': 'ami'},
{'op': 'replace', 'path': '/foo', 'value': 'baz'},
{'op': 'add', 'path': '/ping', 'value': 'pong'},
{'op': 'replace', 'path': '/protected', 'value': True},
{'op': 'remove', 'path': '/type'},
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
# Returned subject entity should reflect the changes
subject = jsonutils.loads(response.text)
self.assertEqual('subject-2', subject['name'])
self.assertEqual('vhd', subject['disk_format'])
self.assertEqual('baz', subject['foo'])
self.assertEqual('pong', subject['ping'])
self.assertTrue(subject['protected'])
self.assertNotIn('type', subject, response.text)
# Adding 11 subject properties should fail since configured limit is 10
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type})
changes = []
for i in range(11):
changes.append({'op': 'add',
'path': '/ping%i' % i,
'value': 'pong'})
data = jsonutils.dumps(changes)
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(413, response.status_code, response.text)
# Adding 3 subject locations should fail since configured limit is 2
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type})
changes = []
for i in range(3):
url = ('http://127.0.0.1:%s/foo_subject' %
getattr(self, 'http_port%d' % i))
changes.append({'op': 'add', 'path': '/locations/-',
'value': {'url': url, 'metadata': {}},
})
data = jsonutils.dumps(changes)
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(413, response.status_code, response.text)
# Ensure the v1.0 json-patch content type is accepted
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.0-json-patch'
headers = self._headers({'content-type': media_type})
data = jsonutils.dumps([{'add': '/ding', 'value': 'dong'}])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
# Returned subject entity should reflect the changes
subject = jsonutils.loads(response.text)
self.assertEqual('dong', subject['ding'])
# Updates should persist across requests
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)
self.assertEqual(subject_id, subject['id'])
self.assertEqual('subject-2', subject['name'])
self.assertEqual('baz', subject['foo'])
self.assertEqual('pong', subject['ping'])
self.assertTrue(subject['protected'])
self.assertNotIn('type', subject, response.text)
# Try to download data before its uploaded
path = self._url('/v1/subjects/%s/file' % subject_id)
headers = self._headers()
response = requests.get(path, headers=headers)
self.assertEqual(204, response.status_code)
def _verify_subject_checksum_and_status(checksum, status):
# Checksum should be populated and status should be active
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)
self.assertEqual(checksum, subject['checksum'])
self.assertEqual(status, subject['status'])
# Upload some subject data
path = self._url('/v1/subjects/%s/file' % subject_id)
headers = self._headers({'Content-Type': 'application/octet-stream'})
response = requests.put(path, headers=headers, data='ZZZZZ')
self.assertEqual(204, response.status_code)
expected_checksum = '8f113e38d28a79a5a451b16048cc2b72'
_verify_subject_checksum_and_status(expected_checksum, 'active')
# `disk_format` and `container_format` cannot
# be replaced when the subject is active.
immutable_paths = ['/disk_format', '/container_format']
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type})
path = self._url('/v1/subjects/%s' % subject_id)
for immutable_path in immutable_paths:
data = jsonutils.dumps([
{'op': 'replace', 'path': immutable_path, 'value': 'ari'},
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(403, response.status_code)
# Try to download the data that was just uploaded
path = self._url('/v1/subjects/%s/file' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
self.assertEqual(expected_checksum, response.headers['Content-MD5'])
self.assertEqual('ZZZZZ', response.text)
# Uploading duplicate data should be rejected with a 409. The
# original data should remain untouched.
path = self._url('/v1/subjects/%s/file' % subject_id)
headers = self._headers({'Content-Type': 'application/octet-stream'})
response = requests.put(path, headers=headers, data='XXX')
self.assertEqual(409, response.status_code)
_verify_subject_checksum_and_status(expected_checksum, 'active')
# Ensure the size is updated to reflect the data uploaded
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
self.assertEqual(5, jsonutils.loads(response.text)['size'])
# Should be able to deactivate subject
path = self._url('/v1/subjects/%s/actions/deactivate' % subject_id)
response = requests.post(path, data={}, headers=self._headers())
self.assertEqual(204, response.status_code)
# Change the subject to public so TENANT2 can see it
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.0-json-patch'
headers = self._headers({'content-type': media_type})
data = jsonutils.dumps([{"replace": "/visibility", "value": "public"}])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
# Tenant2 should get Forbidden when deactivating the public subject
path = self._url('/v1/subjects/%s/actions/deactivate' % subject_id)
response = requests.post(path, data={}, headers=self._headers(
{'X-Tenant-Id': TENANT2}))
self.assertEqual(403, response.status_code)
# Tenant2 should get Forbidden when reactivating the public subject
path = self._url('/v1/subjects/%s/actions/reactivate' % subject_id)
response = requests.post(path, data={}, headers=self._headers(
{'X-Tenant-Id': TENANT2}))
self.assertEqual(403, response.status_code)
# Deactivating a deactivated subject succeeds (no-op)
path = self._url('/v1/subjects/%s/actions/deactivate' % subject_id)
response = requests.post(path, data={}, headers=self._headers())
self.assertEqual(204, response.status_code)
# Can't download a deactivated subject
path = self._url('/v1/subjects/%s/file' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(403, response.status_code)
# Deactivated subject should still be in a listing
path = self._url('/v1/subjects')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(2, len(subjects))
self.assertEqual(subject2_id, subjects[0]['id'])
self.assertEqual(subject_id, subjects[1]['id'])
# Should be able to reactivate a deactivated subject
path = self._url('/v1/subjects/%s/actions/reactivate' % subject_id)
response = requests.post(path, data={}, headers=self._headers())
self.assertEqual(204, response.status_code)
# Reactivating an active subject succeeds (no-op)
path = self._url('/v1/subjects/%s/actions/reactivate' % subject_id)
response = requests.post(path, data={}, headers=self._headers())
self.assertEqual(204, response.status_code)
# Deletion should not work on protected subjects
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.delete(path, headers=self._headers())
self.assertEqual(403, response.status_code)
# Unprotect subject for deletion
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type})
doc = [{'op': 'replace', 'path': '/protected', 'value': False}]
data = jsonutils.dumps(doc)
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
# Deletion should work. Deleting subject-1
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.delete(path, headers=self._headers())
self.assertEqual(204, response.status_code)
# This subject should be no longer be directly accessible
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(404, response.status_code)
# And neither should its data
path = self._url('/v1/subjects/%s/file' % subject_id)
headers = self._headers()
response = requests.get(path, headers=headers)
self.assertEqual(404, response.status_code)
# Subject list should now contain just subject-2
path = self._url('/v1/subjects')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(1, len(subjects))
self.assertEqual(subject2_id, subjects[0]['id'])
# Deleting subject-2 should work
path = self._url('/v1/subjects/%s' % subject2_id)
response = requests.delete(path, headers=self._headers())
self.assertEqual(204, response.status_code)
# Subject list should now be empty
path = self._url('/v1/subjects')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(0, len(subjects))
# Create subject that tries to send True should return 400
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json'})
data = 'true'
response = requests.post(path, headers=headers, data=data)
self.assertEqual(400, response.status_code)
# Create subject that tries to send a string should return 400
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json'})
data = '"hello"'
response = requests.post(path, headers=headers, data=data)
self.assertEqual(400, response.status_code)
# Create subject that tries to send 123 should return 400
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json'})
data = '123'
response = requests.post(path, headers=headers, data=data)
self.assertEqual(400, response.status_code)
self.stop_servers()
def test_update_readonly_prop(self):
self.start_servers(**self.__dict__.copy())
# Create an subject (with two deployer-defined properties)
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json'})
data = jsonutils.dumps({'name': 'subject-1'})
response = requests.post(path, headers=headers, data=data)
subject = jsonutils.loads(response.text)
subject_id = subject['id']
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type})
props = ['/id', '/file', '/location', '/schema', '/self']
for prop in props:
doc = [{'op': 'replace',
'path': prop,
'value': 'value1'}]
data = jsonutils.dumps(doc)
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(403, response.status_code)
for prop in props:
doc = [{'op': 'remove',
'path': prop,
'value': 'value1'}]
data = jsonutils.dumps(doc)
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(403, response.status_code)
for prop in props:
doc = [{'op': 'add',
'path': prop,
'value': 'value1'}]
data = jsonutils.dumps(doc)
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(403, response.status_code)
self.stop_servers()
def test_methods_that_dont_accept_illegal_bodies(self):
# Check subjects can be reached
self.start_servers(**self.__dict__.copy())
path = self._url('/v1/subjects')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
# Test all the schemas
schema_urls = [
'/v1/schemas/subjects',
'/v1/schemas/subject',
'/v1/schemas/members',
'/v1/schemas/member',
]
for value in schema_urls:
path = self._url(value)
data = jsonutils.dumps(["body"])
response = requests.get(path, headers=self._headers(), data=data)
self.assertEqual(400, response.status_code)
# Create subject for use with tests
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json'})
data = jsonutils.dumps({'name': 'subject'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subject = jsonutils.loads(response.text)
subject_id = subject['id']
test_urls = [
('/v1/subjects/%s', 'get'),
('/v1/subjects/%s/actions/deactivate', 'post'),
('/v1/subjects/%s/actions/reactivate', 'post'),
('/v1/subjects/%s/tags/mytag', 'put'),
('/v1/subjects/%s/tags/mytag', 'delete'),
('/v1/subjects/%s/members', 'get'),
('/v1/subjects/%s/file', 'get'),
('/v1/subjects/%s', 'delete'),
]
for link, method in test_urls:
path = self._url(link % subject_id)
data = jsonutils.dumps(["body"])
response = getattr(requests, method)(
path, headers=self._headers(), data=data)
self.assertEqual(400, response.status_code)
# DELETE /subjects/imgid without legal json
path = self._url('/v1/subjects/%s' % subject_id)
data = '{"hello"]'
response = requests.delete(path, headers=self._headers(), data=data)
self.assertEqual(400, response.status_code)
# POST /subjects/imgid/members
path = self._url('/v1/subjects/%s/members' % subject_id)
data = jsonutils.dumps({'member': TENANT3})
response = requests.post(path, headers=self._headers(), data=data)
self.assertEqual(200, response.status_code)
# GET /subjects/imgid/members/memid
path = self._url('/v1/subjects/%s/members/%s' % (subject_id, TENANT3))
data = jsonutils.dumps(["body"])
response = requests.get(path, headers=self._headers(), data=data)
self.assertEqual(400, response.status_code)
# DELETE /subjects/imgid/members/memid
path = self._url('/v1/subjects/%s/members/%s' % (subject_id, TENANT3))
data = jsonutils.dumps(["body"])
response = requests.delete(path, headers=self._headers(), data=data)
self.assertEqual(400, response.status_code)
self.stop_servers()
def test_download_random_access(self):
self.start_servers(**self.__dict__.copy())
# Create another subject (with two deployer-defined properties)
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json'})
data = jsonutils.dumps({'name': 'subject-2', 'type': 'kernel',
'bar': 'foo', 'disk_format': 'aki',
'container_format': 'aki', 'xyz': 'abc'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subject = jsonutils.loads(response.text)
subject_id = subject['id']
# Upload data to subject
subject_data = 'Z' * 15
path = self._url('/v1/subjects/%s/file' % subject_id)
headers = self._headers({'Content-Type': 'application/octet-stream'})
response = requests.put(path, headers=headers, data=subject_data)
self.assertEqual(204, response.status_code)
result_body = ''
for x in range(15):
# NOTE(flaper87): Read just 1 byte. Content-Range is
# 0-indexed and it specifies the first byte to read
# and the last byte to read.
content_range = 'bytes %s-%s/15' % (x, x)
headers = self._headers({'Content-Range': content_range})
path = self._url('/v1/subjects/%s/file' % subject_id)
response = requests.get(path, headers=headers)
result_body += response.text
self.assertEqual(result_body, subject_data)
self.stop_servers()
def test_download_policy_when_cache_is_not_enabled(self):
rules = {'context_is_admin': 'role:admin',
'default': '',
'add_subject': '',
'get_subject': '',
'modify_subject': '',
'upload_subject': '',
'delete_subject': '',
'download_subject': '!'}
self.set_policy_rules(rules)
self.start_servers(**self.__dict__.copy())
# Create an subject
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'member'})
data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki',
'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Returned subject entity
subject = jsonutils.loads(response.text)
subject_id = subject['id']
expected_subject = {
'status': 'queued',
'name': 'subject-1',
'tags': [],
'visibility': 'private',
'self': '/v1/subjects/%s' % subject_id,
'protected': False,
'file': '/v1/subjects/%s/file' % subject_id,
'min_disk': 0,
'min_ram': 0,
'schema': '/v1/schemas/subject',
}
for key, value in six.iteritems(expected_subject):
self.assertEqual(value, subject[key], key)
# Upload data to subject
path = self._url('/v1/subjects/%s/file' % subject_id)
headers = self._headers({'Content-Type': 'application/octet-stream'})
response = requests.put(path, headers=headers, data='ZZZZZ')
self.assertEqual(204, response.status_code)
# Get an subject should fail
path = self._url('/v1/subjects/%s/file' % subject_id)
headers = self._headers({'Content-Type': 'application/octet-stream'})
response = requests.get(path, headers=headers)
self.assertEqual(403, response.status_code)
# Subject Deletion should work
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.delete(path, headers=self._headers())
self.assertEqual(204, response.status_code)
# This subject should be no longer be directly accessible
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(404, response.status_code)
self.stop_servers()
def test_download_subject_not_allowed_using_restricted_policy(self):
rules = {
"context_is_admin": "role:admin",
"default": "",
"add_subject": "",
"get_subject": "",
"modify_subject": "",
"upload_subject": "",
"delete_subject": "",
"restricted":
"not ('aki':%(container_format)s and role:_member_)",
"download_subject": "role:admin or rule:restricted"
}
self.set_policy_rules(rules)
self.start_servers(**self.__dict__.copy())
# Create an subject
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'member'})
data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki',
'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Returned subject entity
subject = jsonutils.loads(response.text)
subject_id = subject['id']
expected_subject = {
'status': 'queued',
'name': 'subject-1',
'tags': [],
'visibility': 'private',
'self': '/v1/subjects/%s' % subject_id,
'protected': False,
'file': '/v1/subjects/%s/file' % subject_id,
'min_disk': 0,
'min_ram': 0,
'schema': '/v1/schemas/subject',
}
for key, value in six.iteritems(expected_subject):
self.assertEqual(value, subject[key], key)
# Upload data to subject
path = self._url('/v1/subjects/%s/file' % subject_id)
headers = self._headers({'Content-Type': 'application/octet-stream'})
response = requests.put(path, headers=headers, data='ZZZZZ')
self.assertEqual(204, response.status_code)
# Get an subject should fail
path = self._url('/v1/subjects/%s/file' % subject_id)
headers = self._headers({'Content-Type': 'application/octet-stream',
'X-Roles': '_member_'})
response = requests.get(path, headers=headers)
self.assertEqual(403, response.status_code)
# Subject Deletion should work
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.delete(path, headers=self._headers())
self.assertEqual(204, response.status_code)
# This subject should be no longer be directly accessible
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(404, response.status_code)
self.stop_servers()
def test_download_subject_allowed_using_restricted_policy(self):
rules = {
"context_is_admin": "role:admin",
"default": "",
"add_subject": "",
"get_subject": "",
"modify_subject": "",
"upload_subject": "",
"get_subject_location": "",
"delete_subject": "",
"restricted":
"not ('aki':%(container_format)s and role:_member_)",
"download_subject": "role:admin or rule:restricted"
}
self.set_policy_rules(rules)
self.start_servers(**self.__dict__.copy())
# Create an subject
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'member'})
data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki',
'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Returned subject entity
subject = jsonutils.loads(response.text)
subject_id = subject['id']
expected_subject = {
'status': 'queued',
'name': 'subject-1',
'tags': [],
'visibility': 'private',
'self': '/v1/subjects/%s' % subject_id,
'protected': False,
'file': '/v1/subjects/%s/file' % subject_id,
'min_disk': 0,
'min_ram': 0,
'schema': '/v1/schemas/subject',
}
for key, value in six.iteritems(expected_subject):
self.assertEqual(value, subject[key], key)
# Upload data to subject
path = self._url('/v1/subjects/%s/file' % subject_id)
headers = self._headers({'Content-Type': 'application/octet-stream'})
response = requests.put(path, headers=headers, data='ZZZZZ')
self.assertEqual(204, response.status_code)
# Get an subject should be allowed
path = self._url('/v1/subjects/%s/file' % subject_id)
headers = self._headers({'Content-Type': 'application/octet-stream',
'X-Roles': 'member'})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
# Subject Deletion should work
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.delete(path, headers=self._headers())
self.assertEqual(204, response.status_code)
# This subject should be no longer be directly accessible
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(404, response.status_code)
self.stop_servers()
def test_download_subject_raises_service_unavailable(self):
"""Test subject download returns HTTPServiceUnavailable."""
self.api_server.show_multiple_locations = True
self.start_servers(**self.__dict__.copy())
# Create an subject
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json'})
data = jsonutils.dumps({'name': 'subject-1',
'disk_format': 'aki',
'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Get subject id
subject = jsonutils.loads(response.text)
subject_id = subject['id']
# Update subject locations via PATCH
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type})
http_server_pid, http_port = test_utils.start_http_server(subject_id,
"subject-1")
values = [{'url': 'http://127.0.0.1:%s/subject-1' % http_port,
'metadata': {'idx': '0'}}]
doc = [{'op': 'replace',
'path': '/locations',
'value': values}]
data = jsonutils.dumps(doc)
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code)
# Download an subject should work
path = self._url('/v1/subjects/%s/file' % subject_id)
headers = self._headers({'Content-Type': 'application/json'})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
# Stop http server used to update subject location
os.kill(http_server_pid, signal.SIGKILL)
# Download an subject should raise HTTPServiceUnavailable
path = self._url('/v1/subjects/%s/file' % subject_id)
headers = self._headers({'Content-Type': 'application/json'})
response = requests.get(path, headers=headers)
self.assertEqual(503, response.status_code)
# Subject Deletion should work
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.delete(path, headers=self._headers())
self.assertEqual(204, response.status_code)
# This subject should be no longer be directly accessible
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(404, response.status_code)
self.stop_servers()
def test_subject_modification_works_for_owning_tenant_id(self):
rules = {
"context_is_admin": "role:admin",
"default": "",
"add_subject": "",
"get_subject": "",
"modify_subject": "tenant:%(owner)s",
"upload_subject": "",
"get_subject_location": "",
"delete_subject": "",
"restricted":
"not ('aki':%(container_format)s and role:_member_)",
"download_subject": "role:admin or rule:restricted"
}
self.set_policy_rules(rules)
self.start_servers(**self.__dict__.copy())
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin'})
data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki',
'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Get the subject's ID
subject = jsonutils.loads(response.text)
subject_id = subject['id']
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers['content-type'] = media_type
del headers['X-Roles']
data = jsonutils.dumps([
{'op': 'replace', 'path': '/name', 'value': 'new-name'},
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code)
self.stop_servers()
def test_subject_modification_fails_on_mismatched_tenant_ids(self):
rules = {
"context_is_admin": "role:admin",
"default": "",
"add_subject": "",
"get_subject": "",
"modify_subject": "'A-Fake-Tenant-Id':%(owner)s",
"upload_subject": "",
"get_subject_location": "",
"delete_subject": "",
"restricted":
"not ('aki':%(container_format)s and role:_member_)",
"download_subject": "role:admin or rule:restricted"
}
self.set_policy_rules(rules)
self.start_servers(**self.__dict__.copy())
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin'})
data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki',
'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Get the subject's ID
subject = jsonutils.loads(response.text)
subject_id = subject['id']
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers['content-type'] = media_type
del headers['X-Roles']
data = jsonutils.dumps([
{'op': 'replace', 'path': '/name', 'value': 'new-name'},
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(403, response.status_code)
self.stop_servers()
def test_member_additions_works_for_owning_tenant_id(self):
rules = {
"context_is_admin": "role:admin",
"default": "",
"add_subject": "",
"get_subject": "",
"modify_subject": "",
"upload_subject": "",
"get_subject_location": "",
"delete_subject": "",
"restricted":
"not ('aki':%(container_format)s and role:_member_)",
"download_subject": "role:admin or rule:restricted",
"add_member": "tenant:%(owner)s",
}
self.set_policy_rules(rules)
self.start_servers(**self.__dict__.copy())
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin'})
data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki',
'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Get the subject's ID
subject = jsonutils.loads(response.text)
subject_id = subject['id']
# Get the subject's members resource
path = self._url('/v1/subjects/%s/members' % subject_id)
body = jsonutils.dumps({'member': TENANT3})
del headers['X-Roles']
response = requests.post(path, headers=headers, data=body)
self.assertEqual(200, response.status_code)
self.stop_servers()
def test_subject_additions_works_only_for_specific_tenant_id(self):
rules = {
"context_is_admin": "role:admin",
"default": "",
"add_subject": "'{0}':%(owner)s".format(TENANT1),
"get_subject": "",
"modify_subject": "",
"upload_subject": "",
"get_subject_location": "",
"delete_subject": "",
"restricted":
"not ('aki':%(container_format)s and role:_member_)",
"download_subject": "role:admin or rule:restricted",
"add_member": "",
}
self.set_policy_rules(rules)
self.start_servers(**self.__dict__.copy())
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin', 'X-Tenant-Id': TENANT1})
data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki',
'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
headers['X-Tenant-Id'] = TENANT2
response = requests.post(path, headers=headers, data=data)
self.assertEqual(403, response.status_code)
self.stop_servers()
def test_owning_tenant_id_can_retrieve_subject_information(self):
rules = {
"context_is_admin": "role:admin",
"default": "",
"add_subject": "",
"get_subject": "tenant:%(owner)s",
"modify_subject": "",
"upload_subject": "",
"get_subject_location": "",
"delete_subject": "",
"restricted":
"not ('aki':%(container_format)s and role:_member_)",
"download_subject": "role:admin or rule:restricted",
"add_member": "",
}
self.set_policy_rules(rules)
self.start_servers(**self.__dict__.copy())
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin', 'X-Tenant-Id': TENANT1})
data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki',
'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Remove the admin role
del headers['X-Roles']
# Get the subject's ID
subject = jsonutils.loads(response.text)
subject_id = subject['id']
# Can retrieve the subject as TENANT1
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
# Can retrieve the subject's members as TENANT1
path = self._url('/v1/subjects/%s/members' % subject_id)
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
headers['X-Tenant-Id'] = TENANT2
response = requests.get(path, headers=headers)
self.assertEqual(403, response.status_code)
self.stop_servers()
def test_owning_tenant_can_publicize_subject(self):
rules = {
"context_is_admin": "role:admin",
"default": "",
"add_subject": "",
"publicize_subject": "tenant:%(owner)s",
"get_subject": "tenant:%(owner)s",
"modify_subject": "",
"upload_subject": "",
"get_subject_location": "",
"delete_subject": "",
"restricted":
"not ('aki':%(container_format)s and role:_member_)",
"download_subject": "role:admin or rule:restricted",
"add_member": "",
}
self.set_policy_rules(rules)
self.start_servers(**self.__dict__.copy())
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin', 'X-Tenant-Id': TENANT1})
data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki',
'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Get the subject's ID
subject = jsonutils.loads(response.text)
subject_id = subject['id']
path = self._url('/v1/subjects/%s' % subject_id)
headers = self._headers({
'Content-Type': 'application/openstack-subjects-v1.1-json-patch',
'X-Tenant-Id': TENANT1,
})
doc = [{'op': 'replace', 'path': '/visibility', 'value': 'public'}]
data = jsonutils.dumps(doc)
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code)
def test_owning_tenant_can_delete_subject(self):
rules = {
"context_is_admin": "role:admin",
"default": "",
"add_subject": "",
"publicize_subject": "tenant:%(owner)s",
"get_subject": "tenant:%(owner)s",
"modify_subject": "",
"upload_subject": "",
"get_subject_location": "",
"delete_subject": "",
"restricted":
"not ('aki':%(container_format)s and role:_member_)",
"download_subject": "role:admin or rule:restricted",
"add_member": "",
}
self.set_policy_rules(rules)
self.start_servers(**self.__dict__.copy())
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin', 'X-Tenant-Id': TENANT1})
data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki',
'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Get the subject's ID
subject = jsonutils.loads(response.text)
subject_id = subject['id']
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.delete(path, headers=headers)
self.assertEqual(204, response.status_code)
def test_list_show_ok_when_get_location_allowed_for_admins(self):
self.api_server.show_subject_direct_url = True
self.api_server.show_multiple_locations = True
# setup context to allow a list locations by admin only
rules = {
"context_is_admin": "role:admin",
"default": "",
"add_subject": "",
"get_subject": "",
"modify_subject": "",
"upload_subject": "",
"get_subject_location": "role:admin",
"delete_subject": "",
"restricted": "",
"download_subject": "",
"add_member": "",
}
self.set_policy_rules(rules)
self.start_servers(**self.__dict__.copy())
# Create an subject
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Tenant-Id': TENANT1})
data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki',
'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Get the subject's ID
subject = jsonutils.loads(response.text)
subject_id = subject['id']
# Can retrieve the subject as TENANT1
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
# Can list subjects as TENANT1
path = self._url('/v1/subjects')
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
self.stop_servers()
def test_subject_size_cap(self):
self.api_server.subject_size_cap = 128
self.start_servers(**self.__dict__.copy())
# create an subject
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json'})
data = jsonutils.dumps({'name': 'subject-size-cap-test-subject',
'type': 'kernel', 'disk_format': 'aki',
'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subject = jsonutils.loads(response.text)
subject_id = subject['id']
# try to populate it with oversized data
path = self._url('/v1/subjects/%s/file' % subject_id)
headers = self._headers({'Content-Type': 'application/octet-stream'})
class StreamSim(object):
# Using a one-shot iterator to force chunked transfer in the PUT
# request
def __init__(self, size):
self.size = size
def __iter__(self):
yield 'Z' * self.size
response = requests.put(path, headers=headers, data=StreamSim(
self.api_server.subject_size_cap + 1))
self.assertEqual(413, response.status_code)
# hashlib.md5('Z'*129).hexdigest()
# == '76522d28cb4418f12704dfa7acd6e7ee'
# If the subject has this checksum, it means that the whole stream was
# accepted and written to the store, which should not be the case.
path = self._url('/v1/subjects/{0}'.format(subject_id))
headers = self._headers({'content-type': 'application/json'})
response = requests.get(path, headers=headers)
subject_checksum = jsonutils.loads(response.text).get('checksum')
self.assertNotEqual(subject_checksum, '76522d28cb4418f12704dfa7acd6e7ee')
def test_permissions(self):
self.start_servers(**self.__dict__.copy())
# Create an subject that belongs to TENANT1
path = self._url('/v1/subjects')
headers = self._headers({'Content-Type': 'application/json'})
data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'raw',
'container_format': 'bare'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subject_id = jsonutils.loads(response.text)['id']
# Upload some subject data
path = self._url('/v1/subjects/%s/file' % subject_id)
headers = self._headers({'Content-Type': 'application/octet-stream'})
response = requests.put(path, headers=headers, data='ZZZZZ')
self.assertEqual(204, response.status_code)
# TENANT1 should see the subject in their list
path = self._url('/v1/subjects')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(subject_id, subjects[0]['id'])
# TENANT1 should be able to access the subject directly
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
# TENANT2 should not see the subject in their list
path = self._url('/v1/subjects')
headers = self._headers({'X-Tenant-Id': TENANT2})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(0, len(subjects))
# TENANT2 should not be able to access the subject directly
path = self._url('/v1/subjects/%s' % subject_id)
headers = self._headers({'X-Tenant-Id': TENANT2})
response = requests.get(path, headers=headers)
self.assertEqual(404, response.status_code)
# TENANT2 should not be able to modify the subject, either
path = self._url('/v1/subjects/%s' % subject_id)
headers = self._headers({
'Content-Type': 'application/openstack-subjects-v1.1-json-patch',
'X-Tenant-Id': TENANT2,
})
doc = [{'op': 'replace', 'path': '/name', 'value': 'subject-2'}]
data = jsonutils.dumps(doc)
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(404, response.status_code)
# TENANT2 should not be able to delete the subject, either
path = self._url('/v1/subjects/%s' % subject_id)
headers = self._headers({'X-Tenant-Id': TENANT2})
response = requests.delete(path, headers=headers)
self.assertEqual(404, response.status_code)
# Publicize the subject as an admin of TENANT1
path = self._url('/v1/subjects/%s' % subject_id)
headers = self._headers({
'Content-Type': 'application/openstack-subjects-v1.1-json-patch',
'X-Roles': 'admin',
})
doc = [{'op': 'replace', 'path': '/visibility', 'value': 'public'}]
data = jsonutils.dumps(doc)
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code)
# TENANT3 should now see the subject in their list
path = self._url('/v1/subjects')
headers = self._headers({'X-Tenant-Id': TENANT3})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(subject_id, subjects[0]['id'])
# TENANT3 should also be able to access the subject directly
path = self._url('/v1/subjects/%s' % subject_id)
headers = self._headers({'X-Tenant-Id': TENANT3})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
# TENANT3 still should not be able to modify the subject
path = self._url('/v1/subjects/%s' % subject_id)
headers = self._headers({
'Content-Type': 'application/openstack-subjects-v1.1-json-patch',
'X-Tenant-Id': TENANT3,
})
doc = [{'op': 'replace', 'path': '/name', 'value': 'subject-2'}]
data = jsonutils.dumps(doc)
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(403, response.status_code)
# TENANT3 should not be able to delete the subject, either
path = self._url('/v1/subjects/%s' % subject_id)
headers = self._headers({'X-Tenant-Id': TENANT3})
response = requests.delete(path, headers=headers)
self.assertEqual(403, response.status_code)
# Subject data should still be present after the failed delete
path = self._url('/v1/subjects/%s/file' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
self.assertEqual(response.text, 'ZZZZZ')
self.stop_servers()
def test_property_protections_with_roles(self):
# Enable property protection
self.api_server.property_protection_file = self.property_file_roles
self.start_servers(**self.__dict__.copy())
# Subject list should be empty
path = self._url('/v1/subjects')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(0, len(subjects))
# Create an subject for role member with extra props
# Raises 403 since user is not allowed to set 'foo'
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'member'})
data = jsonutils.dumps({'name': 'subject-1', 'foo': 'bar',
'disk_format': 'aki',
'container_format': 'aki',
'x_owner_foo': 'o_s_bar'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(403, response.status_code)
# Create an subject for role member without 'foo'
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'member'})
data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki',
'container_format': 'aki',
'x_owner_foo': 'o_s_bar'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Returned subject entity should have 'x_owner_foo'
subject = jsonutils.loads(response.text)
subject_id = subject['id']
expected_subject = {
'status': 'queued',
'name': 'subject-1',
'tags': [],
'visibility': 'private',
'self': '/v1/subjects/%s' % subject_id,
'protected': False,
'file': '/v1/subjects/%s/file' % subject_id,
'min_disk': 0,
'x_owner_foo': 'o_s_bar',
'min_ram': 0,
'schema': '/v1/schemas/subject',
}
for key, value in expected_subject.items():
self.assertEqual(value, subject[key], key)
# Create an subject for role spl_role with extra props
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'spl_role'})
data = jsonutils.dumps({'name': 'subject-1',
'disk_format': 'aki',
'container_format': 'aki',
'spl_create_prop': 'create_bar',
'spl_create_prop_policy': 'create_policy_bar',
'spl_read_prop': 'read_bar',
'spl_update_prop': 'update_bar',
'spl_delete_prop': 'delete_bar',
'spl_delete_empty_prop': ''})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subject = jsonutils.loads(response.text)
subject_id = subject['id']
# Attempt to replace, add and remove properties which are forbidden
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'spl_role'})
data = jsonutils.dumps([
{'op': 'replace', 'path': '/spl_read_prop', 'value': 'r'},
{'op': 'replace', 'path': '/spl_update_prop', 'value': 'u'},
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(403, response.status_code, response.text)
# Attempt to replace, add and remove properties which are forbidden
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'spl_role'})
data = jsonutils.dumps([
{'op': 'add', 'path': '/spl_new_prop', 'value': 'new'},
{'op': 'remove', 'path': '/spl_create_prop'},
{'op': 'remove', 'path': '/spl_delete_prop'},
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(403, response.status_code, response.text)
# Attempt to replace properties
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'spl_role'})
data = jsonutils.dumps([
# Updating an empty property to verify bug #1332103.
{'op': 'replace', 'path': '/spl_update_prop', 'value': ''},
{'op': 'replace', 'path': '/spl_update_prop', 'value': 'u'},
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
# Returned subject entity should reflect the changes
subject = jsonutils.loads(response.text)
# 'spl_update_prop' has update permission for spl_role
# hence the value has changed
self.assertEqual('u', subject['spl_update_prop'])
# Attempt to remove properties
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'spl_role'})
data = jsonutils.dumps([
{'op': 'remove', 'path': '/spl_delete_prop'},
# Deleting an empty property to verify bug #1332103.
{'op': 'remove', 'path': '/spl_delete_empty_prop'},
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
# Returned subject entity should reflect the changes
subject = jsonutils.loads(response.text)
# 'spl_delete_prop' and 'spl_delete_empty_prop' have delete
# permission for spl_role hence the property has been deleted
self.assertNotIn('spl_delete_prop', subject.keys())
self.assertNotIn('spl_delete_empty_prop', subject.keys())
# Subject Deletion should work
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.delete(path, headers=self._headers())
self.assertEqual(204, response.status_code)
# This subject should be no longer be directly accessible
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(404, response.status_code)
self.stop_servers()
def test_property_protections_with_policies(self):
# Enable property protection
self.api_server.property_protection_file = self.property_file_policies
self.api_server.property_protection_rule_format = 'policies'
self.start_servers(**self.__dict__.copy())
# Subject list should be empty
path = self._url('/v1/subjects')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(0, len(subjects))
# Create an subject for role member with extra props
# Raises 403 since user is not allowed to set 'foo'
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'member'})
data = jsonutils.dumps({'name': 'subject-1', 'foo': 'bar',
'disk_format': 'aki',
'container_format': 'aki',
'x_owner_foo': 'o_s_bar'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(403, response.status_code)
# Create an subject for role member without 'foo'
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'member'})
data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki',
'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Returned subject entity
subject = jsonutils.loads(response.text)
subject_id = subject['id']
expected_subject = {
'status': 'queued',
'name': 'subject-1',
'tags': [],
'visibility': 'private',
'self': '/v1/subjects/%s' % subject_id,
'protected': False,
'file': '/v1/subjects/%s/file' % subject_id,
'min_disk': 0,
'min_ram': 0,
'schema': '/v1/schemas/subject',
}
for key, value in expected_subject.items():
self.assertEqual(value, subject[key], key)
# Create an subject for role spl_role with extra props
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'spl_role, admin'})
data = jsonutils.dumps({'name': 'subject-1',
'disk_format': 'aki',
'container_format': 'aki',
'spl_creator_policy': 'creator_bar',
'spl_default_policy': 'default_bar'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subject = jsonutils.loads(response.text)
subject_id = subject['id']
self.assertEqual('creator_bar', subject['spl_creator_policy'])
self.assertEqual('default_bar', subject['spl_default_policy'])
# Attempt to replace a property which is permitted
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'admin'})
data = jsonutils.dumps([
# Updating an empty property to verify bug #1332103.
{'op': 'replace', 'path': '/spl_creator_policy', 'value': ''},
{'op': 'replace', 'path': '/spl_creator_policy', 'value': 'r'},
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
# Returned subject entity should reflect the changes
subject = jsonutils.loads(response.text)
# 'spl_creator_policy' has update permission for admin
# hence the value has changed
self.assertEqual('r', subject['spl_creator_policy'])
# Attempt to replace a property which is forbidden
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'spl_role'})
data = jsonutils.dumps([
{'op': 'replace', 'path': '/spl_creator_policy', 'value': 'z'},
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(403, response.status_code, response.text)
# Attempt to read properties
path = self._url('/v1/subjects/%s' % subject_id)
headers = self._headers({'content-type': media_type,
'X-Roles': 'random_role'})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)
# 'random_role' is allowed read 'spl_default_policy'.
self.assertEqual(subject['spl_default_policy'], 'default_bar')
# 'random_role' is forbidden to read 'spl_creator_policy'.
self.assertNotIn('spl_creator_policy', subject)
# Attempt to replace and remove properties which are permitted
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'admin'})
data = jsonutils.dumps([
# Deleting an empty property to verify bug #1332103.
{'op': 'replace', 'path': '/spl_creator_policy', 'value': ''},
{'op': 'remove', 'path': '/spl_creator_policy'},
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
# Returned subject entity should reflect the changes
subject = jsonutils.loads(response.text)
# 'spl_creator_policy' has delete permission for admin
# hence the value has been deleted
self.assertNotIn('spl_creator_policy', subject)
# Attempt to read a property that is permitted
path = self._url('/v1/subjects/%s' % subject_id)
headers = self._headers({'content-type': media_type,
'X-Roles': 'random_role'})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
# Returned subject entity should reflect the changes
subject = jsonutils.loads(response.text)
self.assertEqual(subject['spl_default_policy'], 'default_bar')
# Subject Deletion should work
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.delete(path, headers=self._headers())
self.assertEqual(204, response.status_code)
# This subject should be no longer be directly accessible
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(404, response.status_code)
self.stop_servers()
def test_property_protections_special_chars_roles(self):
# Enable property protection
self.api_server.property_protection_file = self.property_file_roles
self.start_servers(**self.__dict__.copy())
# Verify both admin and unknown role can create properties marked with
# '@'
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin'})
data = jsonutils.dumps({
'name': 'subject-1',
'disk_format': 'aki',
'container_format': 'aki',
'x_all_permitted_admin': '1'
})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subject = jsonutils.loads(response.text)
subject_id = subject['id']
expected_subject = {
'status': 'queued',
'name': 'subject-1',
'tags': [],
'visibility': 'private',
'self': '/v1/subjects/%s' % subject_id,
'protected': False,
'file': '/v1/subjects/%s/file' % subject_id,
'min_disk': 0,
'x_all_permitted_admin': '1',
'min_ram': 0,
'schema': '/v1/schemas/subject',
}
for key, value in expected_subject.items():
self.assertEqual(value, subject[key], key)
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'joe_soap'})
data = jsonutils.dumps({
'name': 'subject-1',
'disk_format': 'aki',
'container_format': 'aki',
'x_all_permitted_joe_soap': '1'
})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subject = jsonutils.loads(response.text)
subject_id = subject['id']
expected_subject = {
'status': 'queued',
'name': 'subject-1',
'tags': [],
'visibility': 'private',
'self': '/v1/subjects/%s' % subject_id,
'protected': False,
'file': '/v1/subjects/%s/file' % subject_id,
'min_disk': 0,
'x_all_permitted_joe_soap': '1',
'min_ram': 0,
'schema': '/v1/schemas/subject',
}
for key, value in expected_subject.items():
self.assertEqual(value, subject[key], key)
# Verify both admin and unknown role can read properties marked with
# '@'
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin'})
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)
self.assertEqual('1', subject['x_all_permitted_joe_soap'])
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'joe_soap'})
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)
self.assertEqual('1', subject['x_all_permitted_joe_soap'])
# Verify both admin and unknown role can update properties marked with
# '@'
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'admin'})
data = jsonutils.dumps([
{'op': 'replace',
'path': '/x_all_permitted_joe_soap', 'value': '2'}
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
subject = jsonutils.loads(response.text)
self.assertEqual('2', subject['x_all_permitted_joe_soap'])
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'joe_soap'})
data = jsonutils.dumps([
{'op': 'replace',
'path': '/x_all_permitted_joe_soap', 'value': '3'}
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
subject = jsonutils.loads(response.text)
self.assertEqual('3', subject['x_all_permitted_joe_soap'])
# Verify both admin and unknown role can delete properties marked with
# '@'
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin'})
data = jsonutils.dumps({
'name': 'subject-1',
'disk_format': 'aki',
'container_format': 'aki',
'x_all_permitted_a': '1',
'x_all_permitted_b': '2'
})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subject = jsonutils.loads(response.text)
subject_id = subject['id']
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'admin'})
data = jsonutils.dumps([
{'op': 'remove', 'path': '/x_all_permitted_a'}
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
subject = jsonutils.loads(response.text)
self.assertNotIn('x_all_permitted_a', subject.keys())
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'joe_soap'})
data = jsonutils.dumps([
{'op': 'remove', 'path': '/x_all_permitted_b'}
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
subject = jsonutils.loads(response.text)
self.assertNotIn('x_all_permitted_b', subject.keys())
# Verify neither admin nor unknown role can create a property protected
# with '!'
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin'})
data = jsonutils.dumps({
'name': 'subject-1',
'disk_format': 'aki',
'container_format': 'aki',
'x_none_permitted_admin': '1'
})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(403, response.status_code)
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'joe_soap'})
data = jsonutils.dumps({
'name': 'subject-1',
'disk_format': 'aki',
'container_format': 'aki',
'x_none_permitted_joe_soap': '1'
})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(403, response.status_code)
# Verify neither admin nor unknown role can read properties marked with
# '!'
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin'})
data = jsonutils.dumps({
'name': 'subject-1',
'disk_format': 'aki',
'container_format': 'aki',
'x_none_read': '1'
})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subject = jsonutils.loads(response.text)
subject_id = subject['id']
self.assertNotIn('x_none_read', subject.keys())
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin'})
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)
self.assertNotIn('x_none_read', subject.keys())
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'joe_soap'})
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)
self.assertNotIn('x_none_read', subject.keys())
# Verify neither admin nor unknown role can update properties marked
# with '!'
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin'})
data = jsonutils.dumps({
'name': 'subject-1',
'disk_format': 'aki',
'container_format': 'aki',
'x_none_update': '1'
})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subject = jsonutils.loads(response.text)
subject_id = subject['id']
self.assertEqual('1', subject['x_none_update'])
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'admin'})
data = jsonutils.dumps([
{'op': 'replace',
'path': '/x_none_update', 'value': '2'}
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(403, response.status_code, response.text)
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'joe_soap'})
data = jsonutils.dumps([
{'op': 'replace',
'path': '/x_none_update', 'value': '3'}
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(409, response.status_code, response.text)
# Verify neither admin nor unknown role can delete properties marked
# with '!'
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin'})
data = jsonutils.dumps({
'name': 'subject-1',
'disk_format': 'aki',
'container_format': 'aki',
'x_none_delete': '1',
})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subject = jsonutils.loads(response.text)
subject_id = subject['id']
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'admin'})
data = jsonutils.dumps([
{'op': 'remove', 'path': '/x_none_delete'}
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(403, response.status_code, response.text)
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'joe_soap'})
data = jsonutils.dumps([
{'op': 'remove', 'path': '/x_none_delete'}
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(409, response.status_code, response.text)
self.stop_servers()
def test_property_protections_special_chars_policies(self):
# Enable property protection
self.api_server.property_protection_file = self.property_file_policies
self.api_server.property_protection_rule_format = 'policies'
self.start_servers(**self.__dict__.copy())
# Verify both admin and unknown role can create properties marked with
# '@'
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin'})
data = jsonutils.dumps({
'name': 'subject-1',
'disk_format': 'aki',
'container_format': 'aki',
'x_all_permitted_admin': '1'
})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subject = jsonutils.loads(response.text)
subject_id = subject['id']
expected_subject = {
'status': 'queued',
'name': 'subject-1',
'tags': [],
'visibility': 'private',
'self': '/v1/subjects/%s' % subject_id,
'protected': False,
'file': '/v1/subjects/%s/file' % subject_id,
'min_disk': 0,
'x_all_permitted_admin': '1',
'min_ram': 0,
'schema': '/v1/schemas/subject',
}
for key, value in expected_subject.items():
self.assertEqual(value, subject[key], key)
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'joe_soap'})
data = jsonutils.dumps({
'name': 'subject-1',
'disk_format': 'aki',
'container_format': 'aki',
'x_all_permitted_joe_soap': '1'
})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subject = jsonutils.loads(response.text)
subject_id = subject['id']
expected_subject = {
'status': 'queued',
'name': 'subject-1',
'tags': [],
'visibility': 'private',
'self': '/v1/subjects/%s' % subject_id,
'protected': False,
'file': '/v1/subjects/%s/file' % subject_id,
'min_disk': 0,
'x_all_permitted_joe_soap': '1',
'min_ram': 0,
'schema': '/v1/schemas/subject',
}
for key, value in expected_subject.items():
self.assertEqual(value, subject[key], key)
# Verify both admin and unknown role can read properties marked with
# '@'
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin'})
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)
self.assertEqual('1', subject['x_all_permitted_joe_soap'])
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'joe_soap'})
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)
self.assertEqual('1', subject['x_all_permitted_joe_soap'])
# Verify both admin and unknown role can update properties marked with
# '@'
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'admin'})
data = jsonutils.dumps([
{'op': 'replace',
'path': '/x_all_permitted_joe_soap', 'value': '2'}
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
subject = jsonutils.loads(response.text)
self.assertEqual('2', subject['x_all_permitted_joe_soap'])
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'joe_soap'})
data = jsonutils.dumps([
{'op': 'replace',
'path': '/x_all_permitted_joe_soap', 'value': '3'}
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
subject = jsonutils.loads(response.text)
self.assertEqual('3', subject['x_all_permitted_joe_soap'])
# Verify both admin and unknown role can delete properties marked with
# '@'
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin'})
data = jsonutils.dumps({
'name': 'subject-1',
'disk_format': 'aki',
'container_format': 'aki',
'x_all_permitted_a': '1',
'x_all_permitted_b': '2'
})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subject = jsonutils.loads(response.text)
subject_id = subject['id']
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'admin'})
data = jsonutils.dumps([
{'op': 'remove', 'path': '/x_all_permitted_a'}
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
subject = jsonutils.loads(response.text)
self.assertNotIn('x_all_permitted_a', subject.keys())
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'joe_soap'})
data = jsonutils.dumps([
{'op': 'remove', 'path': '/x_all_permitted_b'}
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
subject = jsonutils.loads(response.text)
self.assertNotIn('x_all_permitted_b', subject.keys())
# Verify neither admin nor unknown role can create a property protected
# with '!'
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin'})
data = jsonutils.dumps({
'name': 'subject-1',
'disk_format': 'aki',
'container_format': 'aki',
'x_none_permitted_admin': '1'
})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(403, response.status_code)
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'joe_soap'})
data = jsonutils.dumps({
'name': 'subject-1',
'disk_format': 'aki',
'container_format': 'aki',
'x_none_permitted_joe_soap': '1'
})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(403, response.status_code)
# Verify neither admin nor unknown role can read properties marked with
# '!'
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin'})
data = jsonutils.dumps({
'name': 'subject-1',
'disk_format': 'aki',
'container_format': 'aki',
'x_none_read': '1'
})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subject = jsonutils.loads(response.text)
subject_id = subject['id']
self.assertNotIn('x_none_read', subject.keys())
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin'})
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)
self.assertNotIn('x_none_read', subject.keys())
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'joe_soap'})
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)
self.assertNotIn('x_none_read', subject.keys())
# Verify neither admin nor unknown role can update properties marked
# with '!'
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin'})
data = jsonutils.dumps({
'name': 'subject-1',
'disk_format': 'aki',
'container_format': 'aki',
'x_none_update': '1'
})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subject = jsonutils.loads(response.text)
subject_id = subject['id']
self.assertEqual('1', subject['x_none_update'])
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'admin'})
data = jsonutils.dumps([
{'op': 'replace',
'path': '/x_none_update', 'value': '2'}
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(403, response.status_code, response.text)
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'joe_soap'})
data = jsonutils.dumps([
{'op': 'replace',
'path': '/x_none_update', 'value': '3'}
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(409, response.status_code, response.text)
# Verify neither admin nor unknown role can delete properties marked
# with '!'
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'admin'})
data = jsonutils.dumps({
'name': 'subject-1',
'disk_format': 'aki',
'container_format': 'aki',
'x_none_delete': '1',
})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subject = jsonutils.loads(response.text)
subject_id = subject['id']
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'admin'})
data = jsonutils.dumps([
{'op': 'remove', 'path': '/x_none_delete'}
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(403, response.status_code, response.text)
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'joe_soap'})
data = jsonutils.dumps([
{'op': 'remove', 'path': '/x_none_delete'}
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(409, response.status_code, response.text)
self.stop_servers()
def test_tag_lifecycle(self):
self.start_servers(**self.__dict__.copy())
# Create an subject with a tag - duplicate should be ignored
path = self._url('/v1/subjects')
headers = self._headers({'Content-Type': 'application/json'})
data = jsonutils.dumps({'name': 'subject-1', 'tags': ['sniff', 'sniff']})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subject_id = jsonutils.loads(response.text)['id']
# Subject should show a list with a single tag
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
tags = jsonutils.loads(response.text)['tags']
self.assertEqual(['sniff'], tags)
# Delete all tags
for tag in tags:
path = self._url('/v1/subjects/%s/tags/%s' % (subject_id, tag))
response = requests.delete(path, headers=self._headers())
self.assertEqual(204, response.status_code)
# Update subject with too many tags via PUT
# Configured limit is 10 tags
for i in range(10):
path = self._url('/v1/subjects/%s/tags/foo%i' % (subject_id, i))
response = requests.put(path, headers=self._headers())
self.assertEqual(204, response.status_code)
# 11th tag should fail
path = self._url('/v1/subjects/%s/tags/fail_me' % subject_id)
response = requests.put(path, headers=self._headers())
self.assertEqual(413, response.status_code)
# Make sure the 11th tag was not added
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
tags = jsonutils.loads(response.text)['tags']
self.assertEqual(10, len(tags))
# Update subject tags via PATCH
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type})
doc = [
{
'op': 'replace',
'path': '/tags',
'value': ['foo'],
},
]
data = jsonutils.dumps(doc)
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code)
# Update subject with too many tags via PATCH
# Configured limit is 10 tags
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type})
tags = ['foo%d' % i for i in range(11)]
doc = [
{
'op': 'replace',
'path': '/tags',
'value': tags,
},
]
data = jsonutils.dumps(doc)
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(413, response.status_code)
# Tags should not have changed since request was over limit
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
tags = jsonutils.loads(response.text)['tags']
self.assertEqual(['foo'], tags)
# Update subject with duplicate tag - it should be ignored
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type})
doc = [
{
'op': 'replace',
'path': '/tags',
'value': ['sniff', 'snozz', 'snozz'],
},
]
data = jsonutils.dumps(doc)
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code)
tags = jsonutils.loads(response.text)['tags']
self.assertEqual(['sniff', 'snozz'], sorted(tags))
# Subject should show the appropriate tags
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
tags = jsonutils.loads(response.text)['tags']
self.assertEqual(['sniff', 'snozz'], sorted(tags))
# Attempt to tag the subject with a duplicate should be ignored
path = self._url('/v1/subjects/%s/tags/snozz' % subject_id)
response = requests.put(path, headers=self._headers())
self.assertEqual(204, response.status_code)
# Create another more complex tag
path = self._url('/v1/subjects/%s/tags/gabe%%40example.com' % subject_id)
response = requests.put(path, headers=self._headers())
self.assertEqual(204, response.status_code)
# Double-check that the tags container on the subject is populated
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
tags = jsonutils.loads(response.text)['tags']
self.assertEqual(['gabe@example.com', 'sniff', 'snozz'],
sorted(tags))
# Query subjects by single tag
path = self._url('/v1/subjects?tag=sniff')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(1, len(subjects))
self.assertEqual('subject-1', subjects[0]['name'])
# Query subjects by multiple tags
path = self._url('/v1/subjects?tag=sniff&tag=snozz')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(1, len(subjects))
self.assertEqual('subject-1', subjects[0]['name'])
# Query subjects by tag and other attributes
path = self._url('/v1/subjects?tag=sniff&status=queued')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(1, len(subjects))
self.assertEqual('subject-1', subjects[0]['name'])
# Query subjects by tag and a nonexistent tag
path = self._url('/v1/subjects?tag=sniff&tag=fake')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(0, len(subjects))
# The tag should be deletable
path = self._url('/v1/subjects/%s/tags/gabe%%40example.com' % subject_id)
response = requests.delete(path, headers=self._headers())
self.assertEqual(204, response.status_code)
# List of tags should reflect the deletion
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
tags = jsonutils.loads(response.text)['tags']
self.assertEqual(['sniff', 'snozz'], sorted(tags))
# Deleting the same tag should return a 404
path = self._url('/v1/subjects/%s/tags/gabe%%40example.com' % subject_id)
response = requests.delete(path, headers=self._headers())
self.assertEqual(404, response.status_code)
# The tags won't be able to query the subjects after deleting
path = self._url('/v1/subjects?tag=gabe%%40example.com')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(0, len(subjects))
# Try to add a tag that is too long
big_tag = 'a' * 300
path = self._url('/v1/subjects/%s/tags/%s' % (subject_id, big_tag))
response = requests.put(path, headers=self._headers())
self.assertEqual(400, response.status_code)
# Tags should not have changed since request was over limit
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
tags = jsonutils.loads(response.text)['tags']
self.assertEqual(['sniff', 'snozz'], sorted(tags))
self.stop_servers()
def test_subjects_container(self):
# Subject list should be empty and no next link should be present
self.start_servers(**self.__dict__.copy())
path = self._url('/v1/subjects')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
first = jsonutils.loads(response.text)['first']
self.assertEqual(0, len(subjects))
self.assertNotIn('next', jsonutils.loads(response.text))
self.assertEqual('/v1/subjects', first)
# Create 7 subjects
subjects = []
fixtures = [
{'name': 'subject-3', 'type': 'kernel', 'ping': 'pong',
'container_format': 'ami', 'disk_format': 'ami'},
{'name': 'subject-4', 'type': 'kernel', 'ping': 'pong',
'container_format': 'bare', 'disk_format': 'ami'},
{'name': 'subject-1', 'type': 'kernel', 'ping': 'pong'},
{'name': 'subject-3', 'type': 'ramdisk', 'ping': 'pong'},
{'name': 'subject-2', 'type': 'kernel', 'ping': 'ding'},
{'name': 'subject-3', 'type': 'kernel', 'ping': 'pong'},
{'name': 'subject-2,subject-5', 'type': 'kernel', 'ping': 'pong'},
]
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json'})
for fixture in fixtures:
data = jsonutils.dumps(fixture)
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subjects.append(jsonutils.loads(response.text))
# Subject list should contain 7 subjects
path = self._url('/v1/subjects')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
body = jsonutils.loads(response.text)
self.assertEqual(7, len(body['subjects']))
self.assertEqual('/v1/subjects', body['first'])
self.assertNotIn('next', jsonutils.loads(response.text))
# Subject list filters by created_at time
url_template = '/v1/subjects?created_at=lt:%s'
path = self._url(url_template % subjects[0]['created_at'])
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
body = jsonutils.loads(response.text)
self.assertEqual(0, len(body['subjects']))
self.assertEqual(url_template % subjects[0]['created_at'],
urllib.parse.unquote(body['first']))
# Subject list filters by updated_at time
url_template = '/v1/subjects?updated_at=lt:%s'
path = self._url(url_template % subjects[2]['updated_at'])
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
body = jsonutils.loads(response.text)
self.assertGreaterEqual(3, len(body['subjects']))
self.assertEqual(url_template % subjects[2]['updated_at'],
urllib.parse.unquote(body['first']))
# Subject list filters by updated_at and created time with invalid value
url_template = '/v1/subjects?%s=lt:invalid_value'
for filter in ['updated_at', 'created_at']:
path = self._url(url_template % filter)
response = requests.get(path, headers=self._headers())
self.assertEqual(400, response.status_code)
# Subject list filters by updated_at and created_at with invalid operator
url_template = '/v1/subjects?%s=invalid_operator:2015-11-19T12:24:02Z'
for filter in ['updated_at', 'created_at']:
path = self._url(url_template % filter)
response = requests.get(path, headers=self._headers())
self.assertEqual(400, response.status_code)
# Subject list filters by non-'URL encoding' value
path = self._url('/v1/subjects?name=%FF')
response = requests.get(path, headers=self._headers())
self.assertEqual(400, response.status_code)
# Subject list filters by name with in operator
url_template = '/v1/subjects?name=in:%s'
filter_value = 'subject-1,subject-2'
path = self._url(url_template % filter_value)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
body = jsonutils.loads(response.text)
self.assertGreaterEqual(3, len(body['subjects']))
# Subject list filters by container_format with in operator
url_template = '/v1/subjects?container_format=in:%s'
filter_value = 'bare,ami'
path = self._url(url_template % filter_value)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
body = jsonutils.loads(response.text)
self.assertGreaterEqual(2, len(body['subjects']))
# Subject list filters by disk_format with in operator
url_template = '/v1/subjects?disk_format=in:%s'
filter_value = 'bare,ami,iso'
path = self._url(url_template % filter_value)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
body = jsonutils.loads(response.text)
self.assertGreaterEqual(2, len(body['subjects']))
# Begin pagination after the first subject
template_url = ('/v1/subjects?limit=2&sort_dir=asc&sort_key=name'
'&marker=%s&type=kernel&ping=pong')
path = self._url(template_url % subjects[2]['id'])
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
body = jsonutils.loads(response.text)
self.assertEqual(2, len(body['subjects']))
response_ids = [subject['id'] for subject in body['subjects']]
self.assertEqual([subjects[6]['id'], subjects[0]['id']], response_ids)
# Continue pagination using next link from previous request
path = self._url(body['next'])
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
body = jsonutils.loads(response.text)
self.assertEqual(2, len(body['subjects']))
response_ids = [subject['id'] for subject in body['subjects']]
self.assertEqual([subjects[5]['id'], subjects[1]['id']], response_ids)
# Continue pagination - expect no results
path = self._url(body['next'])
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
body = jsonutils.loads(response.text)
self.assertEqual(0, len(body['subjects']))
# Delete first subject
path = self._url('/v1/subjects/%s' % subjects[0]['id'])
response = requests.delete(path, headers=self._headers())
self.assertEqual(204, response.status_code)
# Ensure bad request for using a deleted subject as marker
path = self._url('/v1/subjects?marker=%s' % subjects[0]['id'])
response = requests.get(path, headers=self._headers())
self.assertEqual(400, response.status_code)
self.stop_servers()
def test_subject_visibility_to_different_users(self):
self.cleanup()
self.api_server.deployment_flavor = 'fakeauth'
self.registry_server.deployment_flavor = 'fakeauth'
kwargs = self.__dict__.copy()
kwargs['use_user_token'] = True
self.start_servers(**kwargs)
owners = ['admin', 'tenant1', 'tenant2', 'none']
visibilities = ['public', 'private']
for owner in owners:
for visibility in visibilities:
path = self._url('/v1/subjects')
headers = self._headers({
'content-type': 'application/json',
'X-Auth-Token': 'createuser:%s:admin' % owner,
})
data = jsonutils.dumps({
'name': '%s-%s' % (owner, visibility),
'visibility': visibility,
})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
def list_subjects(tenant, role='', visibility=None):
auth_token = 'user:%s:%s' % (tenant, role)
headers = {'X-Auth-Token': auth_token}
path = self._url('/v1/subjects')
if visibility is not None:
path += '?visibility=%s' % visibility
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
return jsonutils.loads(response.text)['subjects']
# 1. Known user sees public and their own subjects
subjects = list_subjects('tenant1')
self.assertEqual(5, len(subjects))
for subject in subjects:
self.assertTrue(subject['visibility'] == 'public'
or 'tenant1' in subject['name'])
# 2. Known user, visibility=public, sees all public subjects
subjects = list_subjects('tenant1', visibility='public')
self.assertEqual(4, len(subjects))
for subject in subjects:
self.assertEqual('public', subject['visibility'])
# 3. Known user, visibility=private, sees only their private subject
subjects = list_subjects('tenant1', visibility='private')
self.assertEqual(1, len(subjects))
subject = subjects[0]
self.assertEqual('private', subject['visibility'])
self.assertIn('tenant1', subject['name'])
# 4. Unknown user sees only public subjects
subjects = list_subjects('none')
self.assertEqual(4, len(subjects))
for subject in subjects:
self.assertEqual('public', subject['visibility'])
# 5. Unknown user, visibility=public, sees only public subjects
subjects = list_subjects('none', visibility='public')
self.assertEqual(4, len(subjects))
for subject in subjects:
self.assertEqual('public', subject['visibility'])
# 6. Unknown user, visibility=private, sees no subjects
subjects = list_subjects('none', visibility='private')
self.assertEqual(0, len(subjects))
# 7. Unknown admin sees all subjects
subjects = list_subjects('none', role='admin')
self.assertEqual(8, len(subjects))
# 8. Unknown admin, visibility=public, shows only public subjects
subjects = list_subjects('none', role='admin', visibility='public')
self.assertEqual(4, len(subjects))
for subject in subjects:
self.assertEqual('public', subject['visibility'])
# 9. Unknown admin, visibility=private, sees only private subjects
subjects = list_subjects('none', role='admin', visibility='private')
self.assertEqual(4, len(subjects))
for subject in subjects:
self.assertEqual('private', subject['visibility'])
# 10. Known admin sees all subjects
subjects = list_subjects('admin', role='admin')
self.assertEqual(8, len(subjects))
# 11. Known admin, visibility=public, sees all public subjects
subjects = list_subjects('admin', role='admin', visibility='public')
self.assertEqual(4, len(subjects))
for subject in subjects:
self.assertEqual('public', subject['visibility'])
# 12. Known admin, visibility=private, sees all private subjects
subjects = list_subjects('admin', role='admin', visibility='private')
self.assertEqual(4, len(subjects))
for subject in subjects:
self.assertEqual('private', subject['visibility'])
self.stop_servers()
def test_update_locations(self):
self.api_server.show_multiple_locations = True
self.start_servers(**self.__dict__.copy())
# Create an subject
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json'})
data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki',
'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Returned subject entity should have a generated id and status
subject = jsonutils.loads(response.text)
subject_id = subject['id']
self.assertEqual('queued', subject['status'])
self.assertIsNone(subject['size'])
self.assertIsNone(subject['virtual_size'])
# Update locations for the queued subject
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type})
url = 'http://127.0.0.1:%s/foo_subject' % self.http_port0
data = jsonutils.dumps([{'op': 'replace', 'path': '/locations',
'value': [{'url': url, 'metadata': {}}]
}])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
# The subject size should be updated
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)
self.assertEqual(10, subject['size'])
def test_update_locations_with_restricted_sources(self):
self.api_server.show_multiple_locations = True
self.start_servers(**self.__dict__.copy())
# Create an subject
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json'})
data = jsonutils.dumps({'name': 'subject-1', 'disk_format': 'aki',
'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Returned subject entity should have a generated id and status
subject = jsonutils.loads(response.text)
subject_id = subject['id']
self.assertEqual('queued', subject['status'])
self.assertIsNone(subject['size'])
self.assertIsNone(subject['virtual_size'])
# Update locations for the queued subject
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type})
data = jsonutils.dumps([{'op': 'replace', 'path': '/locations',
'value': [{'url': 'file:///foo_subject',
'metadata': {}}]
}])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(400, response.status_code, response.text)
data = jsonutils.dumps([{'op': 'replace', 'path': '/locations',
'value': [{'url': 'swift+config:///foo_subject',
'metadata': {}}]
}])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(400, response.status_code, response.text)
class TestSubjectsWithRegistry(TestSubjects):
def setUp(self):
super(TestSubjectsWithRegistry, self).setUp()
self.api_server.data_api = (
'subject.tests.functional.v1.registry_data_api')
self.registry_server.deployment_flavor = 'trusted-auth'
class TestSubjectDirectURLVisibility(functional.FunctionalTest):
def setUp(self):
super(TestSubjectDirectURLVisibility, self).setUp()
self.cleanup()
self.api_server.deployment_flavor = 'noauth'
def _url(self, path):
return 'http://127.0.0.1:%d%s' % (self.api_port, path)
def _headers(self, custom_headers=None):
base_headers = {
'X-Identity-Status': 'Confirmed',
'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96',
'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e',
'X-Tenant-Id': TENANT1,
'X-Roles': 'member',
}
base_headers.update(custom_headers or {})
return base_headers
def test_v2_not_enabled(self):
self.api_server.enable_v2_api = False
self.start_servers(**self.__dict__.copy())
path = self._url('/v1/subjects')
response = requests.get(path, headers=self._headers())
self.assertEqual(300, response.status_code)
self.stop_servers()
def test_v2_enabled(self):
self.api_server.enable_v2_api = True
self.start_servers(**self.__dict__.copy())
path = self._url('/v1/subjects')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
self.stop_servers()
def test_subject_direct_url_visible(self):
self.api_server.show_subject_direct_url = True
self.start_servers(**self.__dict__.copy())
# Subject list should be empty
path = self._url('/v1/subjects')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(0, len(subjects))
# Create an subject
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json'})
data = jsonutils.dumps({'name': 'subject-1', 'type': 'kernel',
'foo': 'bar', 'disk_format': 'aki',
'container_format': 'aki',
'visibility': 'public'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Get the subject id
subject = jsonutils.loads(response.text)
subject_id = subject['id']
# Subject direct_url should not be visible before location is set
path = self._url('/v1/subjects/%s' % subject_id)
headers = self._headers({'Content-Type': 'application/json'})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)
self.assertNotIn('direct_url', subject)
# Upload some subject data, setting the subject location
path = self._url('/v1/subjects/%s/file' % subject_id)
headers = self._headers({'Content-Type': 'application/octet-stream'})
response = requests.put(path, headers=headers, data='ZZZZZ')
self.assertEqual(204, response.status_code)
# Subject direct_url should be visible
path = self._url('/v1/subjects/%s' % subject_id)
headers = self._headers({'Content-Type': 'application/json'})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)
self.assertIn('direct_url', subject)
# Subject direct_url should be visible to non-owner, non-admin user
path = self._url('/v1/subjects/%s' % subject_id)
headers = self._headers({'Content-Type': 'application/json',
'X-Tenant-Id': TENANT2})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)
self.assertIn('direct_url', subject)
# Subject direct_url should be visible in a list
path = self._url('/v1/subjects')
headers = self._headers({'Content-Type': 'application/json'})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)['subjects'][0]
self.assertIn('direct_url', subject)
self.stop_servers()
def test_subject_multiple_location_url_visible(self):
self.api_server.show_multiple_locations = True
self.start_servers(**self.__dict__.copy())
# Create an subject
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json'})
data = jsonutils.dumps({'name': 'subject-1', 'type': 'kernel',
'foo': 'bar', 'disk_format': 'aki',
'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Get the subject id
subject = jsonutils.loads(response.text)
subject_id = subject['id']
# Subject locations should not be visible before location is set
path = self._url('/v1/subjects/%s' % subject_id)
headers = self._headers({'Content-Type': 'application/json'})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)
self.assertIn('locations', subject)
self.assertEqual([], subject["locations"])
# Upload some subject data, setting the subject location
path = self._url('/v1/subjects/%s/file' % subject_id)
headers = self._headers({'Content-Type': 'application/octet-stream'})
response = requests.put(path, headers=headers, data='ZZZZZ')
self.assertEqual(204, response.status_code)
# Subject locations should be visible
path = self._url('/v1/subjects/%s' % subject_id)
headers = self._headers({'Content-Type': 'application/json'})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)
self.assertIn('locations', subject)
loc = subject['locations']
self.assertGreater(len(loc), 0)
loc = loc[0]
self.assertIn('url', loc)
self.assertIn('metadata', loc)
self.stop_servers()
def test_subject_direct_url_not_visible(self):
self.api_server.show_subject_direct_url = False
self.start_servers(**self.__dict__.copy())
# Subject list should be empty
path = self._url('/v1/subjects')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(0, len(subjects))
# Create an subject
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json'})
data = jsonutils.dumps({'name': 'subject-1', 'type': 'kernel',
'foo': 'bar', 'disk_format': 'aki',
'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Get the subject id
subject = jsonutils.loads(response.text)
subject_id = subject['id']
# Upload some subject data, setting the subject location
path = self._url('/v1/subjects/%s/file' % subject_id)
headers = self._headers({'Content-Type': 'application/octet-stream'})
response = requests.put(path, headers=headers, data='ZZZZZ')
self.assertEqual(204, response.status_code)
# Subject direct_url should not be visible
path = self._url('/v1/subjects/%s' % subject_id)
headers = self._headers({'Content-Type': 'application/json'})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)
self.assertNotIn('direct_url', subject)
# Subject direct_url should not be visible in a list
path = self._url('/v1/subjects')
headers = self._headers({'Content-Type': 'application/json'})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)['subjects'][0]
self.assertNotIn('direct_url', subject)
self.stop_servers()
class TestSubjectDirectURLVisibilityWithRegistry(TestSubjectDirectURLVisibility):
def setUp(self):
super(TestSubjectDirectURLVisibilityWithRegistry, self).setUp()
self.api_server.data_api = (
'subject.tests.functional.v1.registry_data_api')
self.registry_server.deployment_flavor = 'trusted-auth'
class TestSubjectLocationSelectionStrategy(functional.FunctionalTest):
def setUp(self):
super(TestSubjectLocationSelectionStrategy, self).setUp()
self.cleanup()
self.api_server.deployment_flavor = 'noauth'
for i in range(3):
ret = test_utils.start_http_server("foo_subject_id%d" % i,
"foo_subject%d" % i)
setattr(self, 'http_server%d_pid' % i, ret[0])
setattr(self, 'http_port%d' % i, ret[1])
def tearDown(self):
for i in range(3):
pid = getattr(self, 'http_server%d_pid' % i, None)
if pid:
os.kill(pid, signal.SIGKILL)
super(TestSubjectLocationSelectionStrategy, self).tearDown()
def _url(self, path):
return 'http://127.0.0.1:%d%s' % (self.api_port, path)
def _headers(self, custom_headers=None):
base_headers = {
'X-Identity-Status': 'Confirmed',
'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96',
'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e',
'X-Tenant-Id': TENANT1,
'X-Roles': 'member',
}
base_headers.update(custom_headers or {})
return base_headers
def test_subject_locations_with_order_strategy(self):
self.api_server.show_subject_direct_url = True
self.api_server.show_multiple_locations = True
self.subject_location_quota = 10
self.api_server.location_strategy = 'location_order'
preference = "http, swift, filesystem"
self.api_server.store_type_location_strategy_preference = preference
self.start_servers(**self.__dict__.copy())
# Create an subject
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json'})
data = jsonutils.dumps({'name': 'subject-1', 'type': 'kernel',
'foo': 'bar', 'disk_format': 'aki',
'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Get the subject id
subject = jsonutils.loads(response.text)
subject_id = subject['id']
# Subject locations should not be visible before location is set
path = self._url('/v1/subjects/%s' % subject_id)
headers = self._headers({'Content-Type': 'application/json'})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)
self.assertIn('locations', subject)
self.assertEqual([], subject["locations"])
# Update subject locations via PATCH
path = self._url('/v1/subjects/%s' % subject_id)
media_type = 'application/openstack-subjects-v1.1-json-patch'
headers = self._headers({'content-type': media_type})
values = [{'url': 'http://127.0.0.1:%s/foo_subject' % self.http_port0,
'metadata': {}},
{'url': 'http://127.0.0.1:%s/foo_subject' % self.http_port1,
'metadata': {}}]
doc = [{'op': 'replace',
'path': '/locations',
'value': values}]
data = jsonutils.dumps(doc)
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code)
# Subject locations should be visible
path = self._url('/v1/subjects/%s' % subject_id)
headers = self._headers({'Content-Type': 'application/json'})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
subject = jsonutils.loads(response.text)
self.assertIn('locations', subject)
self.assertEqual(values, subject['locations'])
self.assertIn('direct_url', subject)
self.assertEqual(values[0]['url'], subject['direct_url'])
self.stop_servers()
class TestSubjectLocationSelectionStrategyWithRegistry(
TestSubjectLocationSelectionStrategy):
def setUp(self):
super(TestSubjectLocationSelectionStrategyWithRegistry, self).setUp()
self.api_server.data_api = (
'subject.tests.functional.v1.registry_data_api')
self.registry_server.deployment_flavor = 'trusted-auth'
class TestSubjectMembers(functional.FunctionalTest):
def setUp(self):
super(TestSubjectMembers, self).setUp()
self.cleanup()
self.api_server.deployment_flavor = 'fakeauth'
self.registry_server.deployment_flavor = 'fakeauth'
self.start_servers(**self.__dict__.copy())
def _url(self, path):
return 'http://127.0.0.1:%d%s' % (self.api_port, path)
def _headers(self, custom_headers=None):
base_headers = {
'X-Identity-Status': 'Confirmed',
'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96',
'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e',
'X-Tenant-Id': TENANT1,
'X-Roles': 'member',
}
base_headers.update(custom_headers or {})
return base_headers
def test_subject_member_lifecycle(self):
def get_header(tenant, role=''):
auth_token = 'user:%s:%s' % (tenant, role)
headers = {'X-Auth-Token': auth_token}
return headers
# Subject list should be empty
path = self._url('/v1/subjects')
response = requests.get(path, headers=get_header('tenant1'))
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(0, len(subjects))
owners = ['tenant1', 'tenant2', 'admin']
visibilities = ['public', 'private']
subject_fixture = []
for owner in owners:
for visibility in visibilities:
path = self._url('/v1/subjects')
headers = self._headers({
'content-type': 'application/json',
'X-Auth-Token': 'createuser:%s:admin' % owner,
})
data = jsonutils.dumps({
'name': '%s-%s' % (owner, visibility),
'visibility': visibility,
})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subject_fixture.append(jsonutils.loads(response.text))
# Subject list should contain 4 subjects for tenant1
path = self._url('/v1/subjects')
response = requests.get(path, headers=get_header('tenant1'))
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(4, len(subjects))
# Subject list should contain 3 subjects for TENANT3
path = self._url('/v1/subjects')
response = requests.get(path, headers=get_header(TENANT3))
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(3, len(subjects))
# Add Subject member for tenant1-private subject
path = self._url('/v1/subjects/%s/members' % subject_fixture[1]['id'])
body = jsonutils.dumps({'member': TENANT3})
response = requests.post(path, headers=get_header('tenant1'),
data=body)
self.assertEqual(200, response.status_code)
subject_member = jsonutils.loads(response.text)
self.assertEqual(subject_fixture[1]['id'], subject_member['subject_id'])
self.assertEqual(TENANT3, subject_member['member_id'])
self.assertIn('created_at', subject_member)
self.assertIn('updated_at', subject_member)
self.assertEqual('pending', subject_member['status'])
# Subject list should contain 3 subjects for TENANT3
path = self._url('/v1/subjects')
response = requests.get(path, headers=get_header(TENANT3))
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(3, len(subjects))
# Subject list should contain 0 shared subjects for TENANT3
# because default is accepted
path = self._url('/v1/subjects?visibility=shared')
response = requests.get(path, headers=get_header(TENANT3))
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(0, len(subjects))
# Subject list should contain 4 subjects for TENANT3 with status pending
path = self._url('/v1/subjects?member_status=pending')
response = requests.get(path, headers=get_header(TENANT3))
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(4, len(subjects))
# Subject list should contain 4 subjects for TENANT3 with status all
path = self._url('/v1/subjects?member_status=all')
response = requests.get(path, headers=get_header(TENANT3))
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(4, len(subjects))
# Subject list should contain 1 subject for TENANT3 with status pending
# and visibility shared
path = self._url('/v1/subjects?member_status=pending&visibility=shared')
response = requests.get(path, headers=get_header(TENANT3))
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(1, len(subjects))
self.assertEqual(subjects[0]['name'], 'tenant1-private')
# Subject list should contain 0 subject for TENANT3 with status rejected
# and visibility shared
path = self._url('/v1/subjects?member_status=rejected&visibility=shared')
response = requests.get(path, headers=get_header(TENANT3))
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(0, len(subjects))
# Subject list should contain 0 subject for TENANT3 with status accepted
# and visibility shared
path = self._url('/v1/subjects?member_status=accepted&visibility=shared')
response = requests.get(path, headers=get_header(TENANT3))
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(0, len(subjects))
# Subject list should contain 0 subject for TENANT3 with status accepted
# and visibility private
path = self._url('/v1/subjects?visibility=private')
response = requests.get(path, headers=get_header(TENANT3))
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(0, len(subjects))
# Subject tenant2-private's subject members list should contain no members
path = self._url('/v1/subjects/%s/members' % subject_fixture[3]['id'])
response = requests.get(path, headers=get_header('tenant2'))
self.assertEqual(200, response.status_code)
body = jsonutils.loads(response.text)
self.assertEqual(0, len(body['members']))
# Tenant 1, who is the owner cannot change status of subject member
path = self._url('/v1/subjects/%s/members/%s' % (subject_fixture[1]['id'],
TENANT3))
body = jsonutils.dumps({'status': 'accepted'})
response = requests.put(path, headers=get_header('tenant1'), data=body)
self.assertEqual(403, response.status_code)
# Tenant 1, who is the owner can get status of its own subject member
path = self._url('/v1/subjects/%s/members/%s' % (subject_fixture[1]['id'],
TENANT3))
response = requests.get(path, headers=get_header('tenant1'))
self.assertEqual(200, response.status_code)
body = jsonutils.loads(response.text)
self.assertEqual('pending', body['status'])
self.assertEqual(subject_fixture[1]['id'], body['subject_id'])
self.assertEqual(TENANT3, body['member_id'])
# Tenant 3, who is the member can get status of its own status
path = self._url('/v1/subjects/%s/members/%s' % (subject_fixture[1]['id'],
TENANT3))
response = requests.get(path, headers=get_header(TENANT3))
self.assertEqual(200, response.status_code)
body = jsonutils.loads(response.text)
self.assertEqual('pending', body['status'])
self.assertEqual(subject_fixture[1]['id'], body['subject_id'])
self.assertEqual(TENANT3, body['member_id'])
# Tenant 2, who not the owner cannot get status of subject member
path = self._url('/v1/subjects/%s/members/%s' % (subject_fixture[1]['id'],
TENANT3))
response = requests.get(path, headers=get_header('tenant2'))
self.assertEqual(404, response.status_code)
# Tenant 3 can change status of subject member
path = self._url('/v1/subjects/%s/members/%s' % (subject_fixture[1]['id'],
TENANT3))
body = jsonutils.dumps({'status': 'accepted'})
response = requests.put(path, headers=get_header(TENANT3), data=body)
self.assertEqual(200, response.status_code)
subject_member = jsonutils.loads(response.text)
self.assertEqual(subject_fixture[1]['id'], subject_member['subject_id'])
self.assertEqual(TENANT3, subject_member['member_id'])
self.assertEqual('accepted', subject_member['status'])
# Subject list should contain 4 subjects for TENANT3 because status is
# accepted
path = self._url('/v1/subjects')
response = requests.get(path, headers=get_header(TENANT3))
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(4, len(subjects))
# Tenant 3 invalid status change
path = self._url('/v1/subjects/%s/members/%s' % (subject_fixture[1]['id'],
TENANT3))
body = jsonutils.dumps({'status': 'invalid-status'})
response = requests.put(path, headers=get_header(TENANT3), data=body)
self.assertEqual(400, response.status_code)
# Owner cannot change status of subject
path = self._url('/v1/subjects/%s/members/%s' % (subject_fixture[1]['id'],
TENANT3))
body = jsonutils.dumps({'status': 'accepted'})
response = requests.put(path, headers=get_header('tenant1'), data=body)
self.assertEqual(403, response.status_code)
# Add Subject member for tenant2-private subject
path = self._url('/v1/subjects/%s/members' % subject_fixture[3]['id'])
body = jsonutils.dumps({'member': TENANT4})
response = requests.post(path, headers=get_header('tenant2'),
data=body)
self.assertEqual(200, response.status_code)
subject_member = jsonutils.loads(response.text)
self.assertEqual(subject_fixture[3]['id'], subject_member['subject_id'])
self.assertEqual(TENANT4, subject_member['member_id'])
self.assertIn('created_at', subject_member)
self.assertIn('updated_at', subject_member)
# Add Subject member to public subject
path = self._url('/v1/subjects/%s/members' % subject_fixture[0]['id'])
body = jsonutils.dumps({'member': TENANT2})
response = requests.post(path, headers=get_header('tenant1'),
data=body)
self.assertEqual(403, response.status_code)
# Subject tenant1-private's members list should contain 1 member
path = self._url('/v1/subjects/%s/members' % subject_fixture[1]['id'])
response = requests.get(path, headers=get_header('tenant1'))
self.assertEqual(200, response.status_code)
body = jsonutils.loads(response.text)
self.assertEqual(1, len(body['members']))
# Admin can see any members
path = self._url('/v1/subjects/%s/members' % subject_fixture[1]['id'])
response = requests.get(path, headers=get_header('tenant1', 'admin'))
self.assertEqual(200, response.status_code)
body = jsonutils.loads(response.text)
self.assertEqual(1, len(body['members']))
# Subject members not found for private subject not owned by TENANT 1
path = self._url('/v1/subjects/%s/members' % subject_fixture[3]['id'])
response = requests.get(path, headers=get_header('tenant1'))
self.assertEqual(404, response.status_code)
# Subject members forbidden for public subject
path = self._url('/v1/subjects/%s/members' % subject_fixture[0]['id'])
response = requests.get(path, headers=get_header('tenant1'))
self.assertIn("Public subjects do not have members", response.text)
self.assertEqual(403, response.status_code)
# Subject Member Cannot delete Subject membership
path = self._url('/v1/subjects/%s/members/%s' % (subject_fixture[1]['id'],
TENANT3))
response = requests.delete(path, headers=get_header(TENANT3))
self.assertEqual(403, response.status_code)
# Delete Subject member
path = self._url('/v1/subjects/%s/members/%s' % (subject_fixture[1]['id'],
TENANT3))
response = requests.delete(path, headers=get_header('tenant1'))
self.assertEqual(204, response.status_code)
# Now the subject has no members
path = self._url('/v1/subjects/%s/members' % subject_fixture[1]['id'])
response = requests.get(path, headers=get_header('tenant1'))
self.assertEqual(200, response.status_code)
body = jsonutils.loads(response.text)
self.assertEqual(0, len(body['members']))
# Adding 11 subject members should fail since configured limit is 10
path = self._url('/v1/subjects/%s/members' % subject_fixture[1]['id'])
for i in range(10):
body = jsonutils.dumps({'member': str(uuid.uuid4())})
response = requests.post(path, headers=get_header('tenant1'),
data=body)
self.assertEqual(200, response.status_code)
body = jsonutils.dumps({'member': str(uuid.uuid4())})
response = requests.post(path, headers=get_header('tenant1'),
data=body)
self.assertEqual(413, response.status_code)
# Get Subject member should return not found for public subject
path = self._url('/v1/subjects/%s/members/%s' % (subject_fixture[0]['id'],
TENANT3))
response = requests.get(path, headers=get_header('tenant1'))
self.assertEqual(404, response.status_code)
# Delete Subject member should return forbidden for public subject
path = self._url('/v1/subjects/%s/members/%s' % (subject_fixture[0]['id'],
TENANT3))
response = requests.delete(path, headers=get_header('tenant1'))
self.assertEqual(403, response.status_code)
self.stop_servers()
class TestSubjectMembersWithRegistry(TestSubjectMembers):
def setUp(self):
super(TestSubjectMembersWithRegistry, self).setUp()
self.api_server.data_api = (
'subject.tests.functional.v1.registry_data_api')
self.registry_server.deployment_flavor = 'trusted-auth'
class TestQuotas(functional.FunctionalTest):
def setUp(self):
super(TestQuotas, self).setUp()
self.cleanup()
self.api_server.deployment_flavor = 'noauth'
self.registry_server.deployment_flavor = 'trusted-auth'
self.user_storage_quota = 100
self.start_servers(**self.__dict__.copy())
def _url(self, path):
return 'http://127.0.0.1:%d%s' % (self.api_port, path)
def _headers(self, custom_headers=None):
base_headers = {
'X-Identity-Status': 'Confirmed',
'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96',
'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e',
'X-Tenant-Id': TENANT1,
'X-Roles': 'member',
}
base_headers.update(custom_headers or {})
return base_headers
def _upload_subject_test(self, data_src, expected_status):
# Subject list should be empty
path = self._url('/v1/subjects')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
subjects = jsonutils.loads(response.text)['subjects']
self.assertEqual(0, len(subjects))
# Create an subject (with a deployer-defined property)
path = self._url('/v1/subjects')
headers = self._headers({'content-type': 'application/json'})
data = jsonutils.dumps({'name': 'testimg',
'type': 'kernel',
'foo': 'bar',
'disk_format': 'aki',
'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
subject = jsonutils.loads(response.text)
subject_id = subject['id']
# upload data
path = self._url('/v1/subjects/%s/file' % subject_id)
headers = self._headers({'Content-Type': 'application/octet-stream'})
response = requests.put(path, headers=headers, data=data_src)
self.assertEqual(expected_status, response.status_code)
# Deletion should work
path = self._url('/v1/subjects/%s' % subject_id)
response = requests.delete(path, headers=self._headers())
self.assertEqual(204, response.status_code)
def test_subject_upload_under_quota(self):
data = 'x' * (self.user_storage_quota - 1)
self._upload_subject_test(data, 204)
def test_subject_upload_exceed_quota(self):
data = 'x' * (self.user_storage_quota + 1)
self._upload_subject_test(data, 413)
def test_chunked_subject_upload_under_quota(self):
def data_gen():
yield 'x' * (self.user_storage_quota - 1)
self._upload_subject_test(data_gen(), 204)
def test_chunked_subject_upload_exceed_quota(self):
def data_gen():
yield 'x' * (self.user_storage_quota + 1)
self._upload_subject_test(data_gen(), 413)
class TestQuotasWithRegistry(TestQuotas):
def setUp(self):
super(TestQuotasWithRegistry, self).setUp()
self.api_server.data_api = (
'subject.tests.functional.v1.registry_data_api')
self.registry_server.deployment_flavor = 'trusted-auth'
| 44.683237 | 82 | 0.596688 | 160,703 | 0.993232 | 2,294 | 0.014178 | 0 | 0 | 0 | 0 | 48,474 | 0.299596 |
382cc8f8f5ea8df980757417230fc969f24fd71f | 110 | py | Python | pyfantasy/__init__.py | markwhat1/pyfantasy | 318a7afc97c7bf6ba978ff8bb8c4c58f8ea0d420 | [
"MIT"
] | null | null | null | pyfantasy/__init__.py | markwhat1/pyfantasy | 318a7afc97c7bf6ba978ff8bb8c4c58f8ea0d420 | [
"MIT"
] | null | null | null | pyfantasy/__init__.py | markwhat1/pyfantasy | 318a7afc97c7bf6ba978ff8bb8c4c58f8ea0d420 | [
"MIT"
] | null | null | null | from .pyfantasy import Connection
from .pyfantasy import League, Team, Player
from .yahoo_oauth import OAuth2
| 27.5 | 43 | 0.827273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
382d4e4871a9e9b0d2ed09ed6ba02c12a0996aad | 898 | py | Python | examples/hello_world/python/django_amf_example/urls.py | limscoder/amfast | e77162615090b6a1ad03565afcc4bec4b46f6a11 | [
"MIT"
] | 4 | 2015-12-12T04:34:34.000Z | 2021-07-30T22:11:26.000Z | examples/hello_world/python/django_amf_example/urls.py | limscoder/amfast | e77162615090b6a1ad03565afcc4bec4b46f6a11 | [
"MIT"
] | 3 | 2015-03-23T23:45:30.000Z | 2016-08-17T01:32:51.000Z | examples/hello_world/python/django_amf_example/urls.py | limscoder/amfast | e77162615090b6a1ad03565afcc4bec4b46f6a11 | [
"MIT"
] | 8 | 2015-03-23T23:45:34.000Z | 2018-01-25T16:16:43.000Z | import os
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^django_amf_example/', include('django_amf_example.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/', include(admin.site.urls)),
(r'^$', 'django.views.generic.simple.redirect_to', {'url': '/static/hello_world.html'}),
(r'^amf', 'django_amf_example.hello_world.django_channels.rpc_channel'),
(r'^static/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': os.path.join('flex', 'deploy'),
'show_indexes': True}),
)
| 35.92 | 92 | 0.675947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 695 | 0.773942 |
382e05c1aec0a1e0fafc3e75a778a936b61e0df4 | 51,977 | py | Python | mm_power_sdk_python/models/private_trial.py | MolecularMatch/mm-power-sdk-python | 1fcbcc25c47d7b435e03929cb185eb7f10fb415d | [
"Apache-2.0"
] | null | null | null | mm_power_sdk_python/models/private_trial.py | MolecularMatch/mm-power-sdk-python | 1fcbcc25c47d7b435e03929cb185eb7f10fb415d | [
"Apache-2.0"
] | null | null | null | mm_power_sdk_python/models/private_trial.py | MolecularMatch/mm-power-sdk-python | 1fcbcc25c47d7b435e03929cb185eb7f10fb415d | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
MolecularMatch MMPower
MMPower API # noqa: E501
OpenAPI spec version: 1.0.0
Contact: support@molecularmatch.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PrivateTrial(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'institution_id': 'str',
'institution_study_id': 'str',
'registry_id': 'str',
'visible_to_idn': 'bool',
'brief_title': 'str',
'acronym': 'list[str]',
'official_title': 'str',
'sponsors': 'list[ClinicalTrialSponsors]',
'source': 'str',
'oversight': 'Oversight',
'brief_summary': 'str',
'detailed_description': 'str',
'status': 'str',
'start_date': 'datetime',
'completion_date': 'datetime',
'phase': 'str',
'study_type': 'str',
'has_expanded_access': 'bool',
'expanded_access': 'ExpandedAccess',
'study_design': 'StudyDesign',
'primary_outcome': 'list[Outcome]',
'secondary_outcome': 'list[Outcome]',
'other_outcome': 'list[Outcome]',
'number_of_arms': 'int',
'number_of_groups': 'int',
'enrollment': 'int',
'condition': 'list[str]',
'arm_group': 'list[ArmGroup]',
'intervention': 'list[Intervention]',
'biospec_retention': 'str',
'biospec_descr': 'str',
'eligibility': 'Eligibility',
'overall_official': 'list[Investigator]',
'overall_contact': 'Contact',
'overall_contact_backup': 'Contact',
'location': 'list[Location]',
'location_countries': 'list[str]',
'link': 'str',
'reference': 'list[Reference]',
'verification_date': 'datetime',
'study_first_submitted': 'datetime',
'study_first_posted': 'datetime',
'last_update_posted': 'datetime',
'keyword': 'list[str]',
'responsible_party': 'list[ResponsibleParty]',
'processing_status': 'str',
'test': 'bool'
}
attribute_map = {
'id': 'id',
'institution_id': 'institution_id',
'institution_study_id': 'institution_study_id',
'registry_id': 'registry_id',
'visible_to_idn': 'visible_to_IDN',
'brief_title': 'brief_title',
'acronym': 'acronym',
'official_title': 'official_title',
'sponsors': 'sponsors',
'source': 'source',
'oversight': 'oversight',
'brief_summary': 'brief_summary',
'detailed_description': 'detailed_description',
'status': 'status',
'start_date': 'start_date',
'completion_date': 'completion_date',
'phase': 'phase',
'study_type': 'study_type',
'has_expanded_access': 'has_expanded_access',
'expanded_access': 'expanded_access',
'study_design': 'study_design',
'primary_outcome': 'primary_outcome',
'secondary_outcome': 'secondary_outcome',
'other_outcome': 'other_outcome',
'number_of_arms': 'number_of_arms',
'number_of_groups': 'number_of_groups',
'enrollment': 'enrollment',
'condition': 'condition',
'arm_group': 'arm_group',
'intervention': 'intervention',
'biospec_retention': 'biospec_retention',
'biospec_descr': 'biospec_descr',
'eligibility': 'eligibility',
'overall_official': 'overall_official',
'overall_contact': 'overall_contact',
'overall_contact_backup': 'overall_contact_backup',
'location': 'location',
'location_countries': 'location_countries',
'link': 'link',
'reference': 'reference',
'verification_date': 'verification_date',
'study_first_submitted': 'study_first_submitted',
'study_first_posted': 'study_first_posted',
'last_update_posted': 'last_update_posted',
'keyword': 'keyword',
'responsible_party': 'responsible_party',
'processing_status': 'processing_status',
'test': 'test'
}
def __init__(self, id=None, institution_id=None, institution_study_id=None, registry_id=None, visible_to_idn=True, brief_title=None, acronym=None, official_title=None, sponsors=None, source=None, oversight=None, brief_summary=None, detailed_description=None, status=None, start_date=None, completion_date=None, phase='N/A', study_type=None, has_expanded_access=None, expanded_access=None, study_design=None, primary_outcome=None, secondary_outcome=None, other_outcome=None, number_of_arms=1, number_of_groups=1, enrollment=None, condition=None, arm_group=None, intervention=None, biospec_retention='None Retained', biospec_descr=None, eligibility=None, overall_official=None, overall_contact=None, overall_contact_backup=None, location=None, location_countries=None, link=None, reference=None, verification_date=None, study_first_submitted=None, study_first_posted=None, last_update_posted=None, keyword=None, responsible_party=None, processing_status='received', test=None): # noqa: E501
"""PrivateTrial - a model defined in Swagger""" # noqa: E501
self._id = None
self._institution_id = None
self._institution_study_id = None
self._registry_id = None
self._visible_to_idn = None
self._brief_title = None
self._acronym = None
self._official_title = None
self._sponsors = None
self._source = None
self._oversight = None
self._brief_summary = None
self._detailed_description = None
self._status = None
self._start_date = None
self._completion_date = None
self._phase = None
self._study_type = None
self._has_expanded_access = None
self._expanded_access = None
self._study_design = None
self._primary_outcome = None
self._secondary_outcome = None
self._other_outcome = None
self._number_of_arms = None
self._number_of_groups = None
self._enrollment = None
self._condition = None
self._arm_group = None
self._intervention = None
self._biospec_retention = None
self._biospec_descr = None
self._eligibility = None
self._overall_official = None
self._overall_contact = None
self._overall_contact_backup = None
self._location = None
self._location_countries = None
self._link = None
self._reference = None
self._verification_date = None
self._study_first_submitted = None
self._study_first_posted = None
self._last_update_posted = None
self._keyword = None
self._responsible_party = None
self._processing_status = None
self._test = None
self.discriminator = None
if id is not None:
self.id = id
self.institution_id = institution_id
self.institution_study_id = institution_study_id
if registry_id is not None:
self.registry_id = registry_id
if visible_to_idn is not None:
self.visible_to_idn = visible_to_idn
if brief_title is not None:
self.brief_title = brief_title
if acronym is not None:
self.acronym = acronym
self.official_title = official_title
if sponsors is not None:
self.sponsors = sponsors
if source is not None:
self.source = source
if oversight is not None:
self.oversight = oversight
if brief_summary is not None:
self.brief_summary = brief_summary
if detailed_description is not None:
self.detailed_description = detailed_description
self.status = status
self.start_date = start_date
if completion_date is not None:
self.completion_date = completion_date
if phase is not None:
self.phase = phase
self.study_type = study_type
if has_expanded_access is not None:
self.has_expanded_access = has_expanded_access
if expanded_access is not None:
self.expanded_access = expanded_access
if study_design is not None:
self.study_design = study_design
if primary_outcome is not None:
self.primary_outcome = primary_outcome
if secondary_outcome is not None:
self.secondary_outcome = secondary_outcome
if other_outcome is not None:
self.other_outcome = other_outcome
if number_of_arms is not None:
self.number_of_arms = number_of_arms
if number_of_groups is not None:
self.number_of_groups = number_of_groups
if enrollment is not None:
self.enrollment = enrollment
if condition is not None:
self.condition = condition
if arm_group is not None:
self.arm_group = arm_group
if intervention is not None:
self.intervention = intervention
if biospec_retention is not None:
self.biospec_retention = biospec_retention
if biospec_descr is not None:
self.biospec_descr = biospec_descr
if eligibility is not None:
self.eligibility = eligibility
if overall_official is not None:
self.overall_official = overall_official
if overall_contact is not None:
self.overall_contact = overall_contact
if overall_contact_backup is not None:
self.overall_contact_backup = overall_contact_backup
self.location = location
if location_countries is not None:
self.location_countries = location_countries
if link is not None:
self.link = link
if reference is not None:
self.reference = reference
if verification_date is not None:
self.verification_date = verification_date
if study_first_submitted is not None:
self.study_first_submitted = study_first_submitted
if study_first_posted is not None:
self.study_first_posted = study_first_posted
if last_update_posted is not None:
self.last_update_posted = last_update_posted
if keyword is not None:
self.keyword = keyword
if responsible_party is not None:
self.responsible_party = responsible_party
if processing_status is not None:
self.processing_status = processing_status
if test is not None:
self.test = test
@property
def id(self):
"""Gets the id of this PrivateTrial. # noqa: E501
unique study identifier. # noqa: E501
:return: The id of this PrivateTrial. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this PrivateTrial.
unique study identifier. # noqa: E501
:param id: The id of this PrivateTrial. # noqa: E501
:type: str
"""
self._id = id
@property
def institution_id(self):
"""Gets the institution_id of this PrivateTrial. # noqa: E501
Unique institution identifier. # noqa: E501
:return: The institution_id of this PrivateTrial. # noqa: E501
:rtype: str
"""
return self._institution_id
@institution_id.setter
def institution_id(self, institution_id):
"""Sets the institution_id of this PrivateTrial.
Unique institution identifier. # noqa: E501
:param institution_id: The institution_id of this PrivateTrial. # noqa: E501
:type: str
"""
if institution_id is None:
raise ValueError("Invalid value for `institution_id`, must not be `None`") # noqa: E501
self._institution_id = institution_id
@property
def institution_study_id(self):
"""Gets the institution_study_id of this PrivateTrial. # noqa: E501
Unique study identifier (for the institution). # noqa: E501
:return: The institution_study_id of this PrivateTrial. # noqa: E501
:rtype: str
"""
return self._institution_study_id
@institution_study_id.setter
def institution_study_id(self, institution_study_id):
"""Sets the institution_study_id of this PrivateTrial.
Unique study identifier (for the institution). # noqa: E501
:param institution_study_id: The institution_study_id of this PrivateTrial. # noqa: E501
:type: str
"""
if institution_study_id is None:
raise ValueError("Invalid value for `institution_study_id`, must not be `None`") # noqa: E501
self._institution_study_id = institution_study_id
@property
def registry_id(self):
"""Gets the registry_id of this PrivateTrial. # noqa: E501
The public registry study id. This is only populated once the trial is no longer a private trial. # noqa: E501
:return: The registry_id of this PrivateTrial. # noqa: E501
:rtype: str
"""
return self._registry_id
@registry_id.setter
def registry_id(self, registry_id):
"""Sets the registry_id of this PrivateTrial.
The public registry study id. This is only populated once the trial is no longer a private trial. # noqa: E501
:param registry_id: The registry_id of this PrivateTrial. # noqa: E501
:type: str
"""
self._registry_id = registry_id
@property
def visible_to_idn(self):
"""Gets the visible_to_idn of this PrivateTrial. # noqa: E501
If true, then this trial will be visible to the entire IDN, else it is visible only to the owning institution. # noqa: E501
:return: The visible_to_idn of this PrivateTrial. # noqa: E501
:rtype: bool
"""
return self._visible_to_idn
@visible_to_idn.setter
def visible_to_idn(self, visible_to_idn):
"""Sets the visible_to_idn of this PrivateTrial.
If true, then this trial will be visible to the entire IDN, else it is visible only to the owning institution. # noqa: E501
:param visible_to_idn: The visible_to_idn of this PrivateTrial. # noqa: E501
:type: bool
"""
self._visible_to_idn = visible_to_idn
@property
def brief_title(self):
"""Gets the brief_title of this PrivateTrial. # noqa: E501
A short title of the clinical study written in language intended for the lay public. The title should include, where possible, information on the participants, condition being evaluated, and intervention(s) studied. # noqa: E501
:return: The brief_title of this PrivateTrial. # noqa: E501
:rtype: str
"""
return self._brief_title
@brief_title.setter
def brief_title(self, brief_title):
"""Sets the brief_title of this PrivateTrial.
A short title of the clinical study written in language intended for the lay public. The title should include, where possible, information on the participants, condition being evaluated, and intervention(s) studied. # noqa: E501
:param brief_title: The brief_title of this PrivateTrial. # noqa: E501
:type: str
"""
self._brief_title = brief_title
@property
def acronym(self):
"""Gets the acronym of this PrivateTrial. # noqa: E501
Acronyms or abbreviations used publicly to identify the clinical study. # noqa: E501
:return: The acronym of this PrivateTrial. # noqa: E501
:rtype: list[str]
"""
return self._acronym
@acronym.setter
def acronym(self, acronym):
"""Sets the acronym of this PrivateTrial.
Acronyms or abbreviations used publicly to identify the clinical study. # noqa: E501
:param acronym: The acronym of this PrivateTrial. # noqa: E501
:type: list[str]
"""
self._acronym = acronym
@property
def official_title(self):
"""Gets the official_title of this PrivateTrial. # noqa: E501
Official title for the clinical trial. # noqa: E501
:return: The official_title of this PrivateTrial. # noqa: E501
:rtype: str
"""
return self._official_title
@official_title.setter
def official_title(self, official_title):
"""Sets the official_title of this PrivateTrial.
Official title for the clinical trial. # noqa: E501
:param official_title: The official_title of this PrivateTrial. # noqa: E501
:type: str
"""
if official_title is None:
raise ValueError("Invalid value for `official_title`, must not be `None`") # noqa: E501
self._official_title = official_title
@property
def sponsors(self):
"""Gets the sponsors of this PrivateTrial. # noqa: E501
The list of organizations or persons who initiated the study and who have authority and control over the study. # noqa: E501
:return: The sponsors of this PrivateTrial. # noqa: E501
:rtype: list[ClinicalTrialSponsors]
"""
return self._sponsors
@sponsors.setter
def sponsors(self, sponsors):
"""Sets the sponsors of this PrivateTrial.
The list of organizations or persons who initiated the study and who have authority and control over the study. # noqa: E501
:param sponsors: The sponsors of this PrivateTrial. # noqa: E501
:type: list[ClinicalTrialSponsors]
"""
self._sponsors = sponsors
@property
def source(self):
"""Gets the source of this PrivateTrial. # noqa: E501
Native data source of this record # noqa: E501
:return: The source of this PrivateTrial. # noqa: E501
:rtype: str
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this PrivateTrial.
Native data source of this record # noqa: E501
:param source: The source of this PrivateTrial. # noqa: E501
:type: str
"""
self._source = source
@property
def oversight(self):
"""Gets the oversight of this PrivateTrial. # noqa: E501
:return: The oversight of this PrivateTrial. # noqa: E501
:rtype: Oversight
"""
return self._oversight
@oversight.setter
def oversight(self, oversight):
"""Sets the oversight of this PrivateTrial.
:param oversight: The oversight of this PrivateTrial. # noqa: E501
:type: Oversight
"""
self._oversight = oversight
@property
def brief_summary(self):
"""Gets the brief_summary of this PrivateTrial. # noqa: E501
A short description of the clinical study, including a brief statement of the clinical study's hypothesis, written in language intended for the lay public. # noqa: E501
:return: The brief_summary of this PrivateTrial. # noqa: E501
:rtype: str
"""
return self._brief_summary
@brief_summary.setter
def brief_summary(self, brief_summary):
"""Sets the brief_summary of this PrivateTrial.
A short description of the clinical study, including a brief statement of the clinical study's hypothesis, written in language intended for the lay public. # noqa: E501
:param brief_summary: The brief_summary of this PrivateTrial. # noqa: E501
:type: str
"""
self._brief_summary = brief_summary
@property
def detailed_description(self):
"""Gets the detailed_description of this PrivateTrial. # noqa: E501
Extended description of the protocol, including more technical information (as compared to the Brief Summary), if desired. Do not include the entire protocol; do not duplicate information recorded in other data elements, such as Eligibility Criteria or outcome measures. # noqa: E501
:return: The detailed_description of this PrivateTrial. # noqa: E501
:rtype: str
"""
return self._detailed_description
@detailed_description.setter
def detailed_description(self, detailed_description):
"""Sets the detailed_description of this PrivateTrial.
Extended description of the protocol, including more technical information (as compared to the Brief Summary), if desired. Do not include the entire protocol; do not duplicate information recorded in other data elements, such as Eligibility Criteria or outcome measures. # noqa: E501
:param detailed_description: The detailed_description of this PrivateTrial. # noqa: E501
:type: str
"""
self._detailed_description = detailed_description
@property
def status(self):
"""Gets the status of this PrivateTrial. # noqa: E501
Trial recruiting status. # noqa: E501
:return: The status of this PrivateTrial. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this PrivateTrial.
Trial recruiting status. # noqa: E501
:param status: The status of this PrivateTrial. # noqa: E501
:type: str
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
allowed_values = ["Active, not recruiting", "Approved for marketing", "Available", "Completed", "Enrolling by invitation", "No longer available", "Not yet recruiting", "Recruiting", "Suspended", "Temporarily not available", "Terminated", "Withdrawn", "Withheld", "Unknown status"] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def start_date(self):
"""Gets the start_date of this PrivateTrial. # noqa: E501
The estimated date on which the clinical study will be open for recruitment of participants, or the actual date on which the first participant was enrolled. # noqa: E501
:return: The start_date of this PrivateTrial. # noqa: E501
:rtype: datetime
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this PrivateTrial.
The estimated date on which the clinical study will be open for recruitment of participants, or the actual date on which the first participant was enrolled. # noqa: E501
:param start_date: The start_date of this PrivateTrial. # noqa: E501
:type: datetime
"""
if start_date is None:
raise ValueError("Invalid value for `start_date`, must not be `None`") # noqa: E501
self._start_date = start_date
@property
def completion_date(self):
"""Gets the completion_date of this PrivateTrial. # noqa: E501
The date the final participant was examined or received an intervention for purposes of final collection of data for the primary and secondary outcome measures and adverse events (for example, last participant’s last visit), whether the clinical study concluded according to the pre-specified protocol or was terminated # noqa: E501
:return: The completion_date of this PrivateTrial. # noqa: E501
:rtype: datetime
"""
return self._completion_date
@completion_date.setter
def completion_date(self, completion_date):
"""Sets the completion_date of this PrivateTrial.
The date the final participant was examined or received an intervention for purposes of final collection of data for the primary and secondary outcome measures and adverse events (for example, last participant’s last visit), whether the clinical study concluded according to the pre-specified protocol or was terminated # noqa: E501
:param completion_date: The completion_date of this PrivateTrial. # noqa: E501
:type: datetime
"""
self._completion_date = completion_date
@property
def phase(self):
"""Gets the phase of this PrivateTrial. # noqa: E501
For a clinical trial of a drug product (including a biological product), the numerical phase of such clinical trial, consistent with terminology in 21 CFR 312.21 and in 21 CFR 312.85 for phase 4 studies. # noqa: E501
:return: The phase of this PrivateTrial. # noqa: E501
:rtype: str
"""
return self._phase
@phase.setter
def phase(self, phase):
"""Sets the phase of this PrivateTrial.
For a clinical trial of a drug product (including a biological product), the numerical phase of such clinical trial, consistent with terminology in 21 CFR 312.21 and in 21 CFR 312.85 for phase 4 studies. # noqa: E501
:param phase: The phase of this PrivateTrial. # noqa: E501
:type: str
"""
allowed_values = ["N/A", "Early Phase 1", "Phase 1", "Phase 1/Phase 2", "Phase 2", "Phase 2/Phase 3", "Phase 3", "Phase 4"] # noqa: E501
if phase not in allowed_values:
raise ValueError(
"Invalid value for `phase` ({0}), must be one of {1}" # noqa: E501
.format(phase, allowed_values)
)
self._phase = phase
@property
def study_type(self):
"""Gets the study_type of this PrivateTrial. # noqa: E501
The nature of the investigation or investigational use for which clinical study information is being submitted. # noqa: E501
:return: The study_type of this PrivateTrial. # noqa: E501
:rtype: str
"""
return self._study_type
@study_type.setter
def study_type(self, study_type):
"""Sets the study_type of this PrivateTrial.
The nature of the investigation or investigational use for which clinical study information is being submitted. # noqa: E501
:param study_type: The study_type of this PrivateTrial. # noqa: E501
:type: str
"""
if study_type is None:
raise ValueError("Invalid value for `study_type`, must not be `None`") # noqa: E501
allowed_values = ["Expanded Access", "Interventional", "N/A", "Observational", "Observational [Patient Registry]"] # noqa: E501
if study_type not in allowed_values:
raise ValueError(
"Invalid value for `study_type` ({0}), must be one of {1}" # noqa: E501
.format(study_type, allowed_values)
)
self._study_type = study_type
@property
def has_expanded_access(self):
"""Gets the has_expanded_access of this PrivateTrial. # noqa: E501
Whether there is expanded access to the investigational product for patients who do not qualify for enrollment in a clinical trial. Expanded Access for investigational drug products (including biological products) includes all expanded access types under section 561 of the Federal Food, Drug, and Cosmetic Act: (1) for individual participants, including emergency use; (2) for intermediate-size participant populations; and (3) under a treatment IND or treatment protocol. # noqa: E501
:return: The has_expanded_access of this PrivateTrial. # noqa: E501
:rtype: bool
"""
return self._has_expanded_access
@has_expanded_access.setter
def has_expanded_access(self, has_expanded_access):
"""Sets the has_expanded_access of this PrivateTrial.
Whether there is expanded access to the investigational product for patients who do not qualify for enrollment in a clinical trial. Expanded Access for investigational drug products (including biological products) includes all expanded access types under section 561 of the Federal Food, Drug, and Cosmetic Act: (1) for individual participants, including emergency use; (2) for intermediate-size participant populations; and (3) under a treatment IND or treatment protocol. # noqa: E501
:param has_expanded_access: The has_expanded_access of this PrivateTrial. # noqa: E501
:type: bool
"""
self._has_expanded_access = has_expanded_access
@property
def expanded_access(self):
"""Gets the expanded_access of this PrivateTrial. # noqa: E501
:return: The expanded_access of this PrivateTrial. # noqa: E501
:rtype: ExpandedAccess
"""
return self._expanded_access
@expanded_access.setter
def expanded_access(self, expanded_access):
"""Sets the expanded_access of this PrivateTrial.
:param expanded_access: The expanded_access of this PrivateTrial. # noqa: E501
:type: ExpandedAccess
"""
self._expanded_access = expanded_access
@property
def study_design(self):
"""Gets the study_design of this PrivateTrial. # noqa: E501
:return: The study_design of this PrivateTrial. # noqa: E501
:rtype: StudyDesign
"""
return self._study_design
@study_design.setter
def study_design(self, study_design):
"""Sets the study_design of this PrivateTrial.
:param study_design: The study_design of this PrivateTrial. # noqa: E501
:type: StudyDesign
"""
self._study_design = study_design
@property
def primary_outcome(self):
"""Gets the primary_outcome of this PrivateTrial. # noqa: E501
The outcome that an investigator considers to be the most important among the many outcomes that are to be examined in the study. # noqa: E501
:return: The primary_outcome of this PrivateTrial. # noqa: E501
:rtype: list[Outcome]
"""
return self._primary_outcome
@primary_outcome.setter
def primary_outcome(self, primary_outcome):
"""Sets the primary_outcome of this PrivateTrial.
The outcome that an investigator considers to be the most important among the many outcomes that are to be examined in the study. # noqa: E501
:param primary_outcome: The primary_outcome of this PrivateTrial. # noqa: E501
:type: list[Outcome]
"""
self._primary_outcome = primary_outcome
@property
def secondary_outcome(self):
"""Gets the secondary_outcome of this PrivateTrial. # noqa: E501
:return: The secondary_outcome of this PrivateTrial. # noqa: E501
:rtype: list[Outcome]
"""
return self._secondary_outcome
@secondary_outcome.setter
def secondary_outcome(self, secondary_outcome):
"""Sets the secondary_outcome of this PrivateTrial.
:param secondary_outcome: The secondary_outcome of this PrivateTrial. # noqa: E501
:type: list[Outcome]
"""
self._secondary_outcome = secondary_outcome
@property
def other_outcome(self):
"""Gets the other_outcome of this PrivateTrial. # noqa: E501
:return: The other_outcome of this PrivateTrial. # noqa: E501
:rtype: list[Outcome]
"""
return self._other_outcome
@other_outcome.setter
def other_outcome(self, other_outcome):
"""Sets the other_outcome of this PrivateTrial.
:param other_outcome: The other_outcome of this PrivateTrial. # noqa: E501
:type: list[Outcome]
"""
self._other_outcome = other_outcome
@property
def number_of_arms(self):
"""Gets the number_of_arms of this PrivateTrial. # noqa: E501
The number of trial arms. # noqa: E501
:return: The number_of_arms of this PrivateTrial. # noqa: E501
:rtype: int
"""
return self._number_of_arms
@number_of_arms.setter
def number_of_arms(self, number_of_arms):
"""Sets the number_of_arms of this PrivateTrial.
The number of trial arms. # noqa: E501
:param number_of_arms: The number_of_arms of this PrivateTrial. # noqa: E501
:type: int
"""
self._number_of_arms = number_of_arms
@property
def number_of_groups(self):
"""Gets the number_of_groups of this PrivateTrial. # noqa: E501
The number of trial groups. # noqa: E501
:return: The number_of_groups of this PrivateTrial. # noqa: E501
:rtype: int
"""
return self._number_of_groups
@number_of_groups.setter
def number_of_groups(self, number_of_groups):
"""Sets the number_of_groups of this PrivateTrial.
The number of trial groups. # noqa: E501
:param number_of_groups: The number_of_groups of this PrivateTrial. # noqa: E501
:type: int
"""
self._number_of_groups = number_of_groups
@property
def enrollment(self):
"""Gets the enrollment of this PrivateTrial. # noqa: E501
The estimated total number of participants to be enrolled (target number) or the actual total number of participants that are enrolled in the clinical study. # noqa: E501
:return: The enrollment of this PrivateTrial. # noqa: E501
:rtype: int
"""
return self._enrollment
@enrollment.setter
def enrollment(self, enrollment):
"""Sets the enrollment of this PrivateTrial.
The estimated total number of participants to be enrolled (target number) or the actual total number of participants that are enrolled in the clinical study. # noqa: E501
:param enrollment: The enrollment of this PrivateTrial. # noqa: E501
:type: int
"""
self._enrollment = enrollment
@property
def condition(self):
"""Gets the condition of this PrivateTrial. # noqa: E501
Diseases/Conditions related to this trial. # noqa: E501
:return: The condition of this PrivateTrial. # noqa: E501
:rtype: list[str]
"""
return self._condition
@condition.setter
def condition(self, condition):
"""Sets the condition of this PrivateTrial.
Diseases/Conditions related to this trial. # noqa: E501
:param condition: The condition of this PrivateTrial. # noqa: E501
:type: list[str]
"""
self._condition = condition
@property
def arm_group(self):
"""Gets the arm_group of this PrivateTrial. # noqa: E501
Pre-specified groups of participants in a clinical trial assigned to receive specific interventions (or no intervention) according to a protocol. # noqa: E501
:return: The arm_group of this PrivateTrial. # noqa: E501
:rtype: list[ArmGroup]
"""
return self._arm_group
@arm_group.setter
def arm_group(self, arm_group):
"""Sets the arm_group of this PrivateTrial.
Pre-specified groups of participants in a clinical trial assigned to receive specific interventions (or no intervention) according to a protocol. # noqa: E501
:param arm_group: The arm_group of this PrivateTrial. # noqa: E501
:type: list[ArmGroup]
"""
self._arm_group = arm_group
@property
def intervention(self):
"""Gets the intervention of this PrivateTrial. # noqa: E501
Specifies the intervention(s) associated with each arm or group. # noqa: E501
:return: The intervention of this PrivateTrial. # noqa: E501
:rtype: list[Intervention]
"""
return self._intervention
@intervention.setter
def intervention(self, intervention):
"""Sets the intervention of this PrivateTrial.
Specifies the intervention(s) associated with each arm or group. # noqa: E501
:param intervention: The intervention of this PrivateTrial. # noqa: E501
:type: list[Intervention]
"""
self._intervention = intervention
@property
def biospec_retention(self):
"""Gets the biospec_retention of this PrivateTrial. # noqa: E501
:return: The biospec_retention of this PrivateTrial. # noqa: E501
:rtype: str
"""
return self._biospec_retention
@biospec_retention.setter
def biospec_retention(self, biospec_retention):
"""Sets the biospec_retention of this PrivateTrial.
:param biospec_retention: The biospec_retention of this PrivateTrial. # noqa: E501
:type: str
"""
allowed_values = ["None Retained", "Samples With DNA", "Samples Without DNA"] # noqa: E501
if biospec_retention not in allowed_values:
raise ValueError(
"Invalid value for `biospec_retention` ({0}), must be one of {1}" # noqa: E501
.format(biospec_retention, allowed_values)
)
self._biospec_retention = biospec_retention
@property
def biospec_descr(self):
"""Gets the biospec_descr of this PrivateTrial. # noqa: E501
:return: The biospec_descr of this PrivateTrial. # noqa: E501
:rtype: str
"""
return self._biospec_descr
@biospec_descr.setter
def biospec_descr(self, biospec_descr):
"""Sets the biospec_descr of this PrivateTrial.
:param biospec_descr: The biospec_descr of this PrivateTrial. # noqa: E501
:type: str
"""
self._biospec_descr = biospec_descr
@property
def eligibility(self):
"""Gets the eligibility of this PrivateTrial. # noqa: E501
:return: The eligibility of this PrivateTrial. # noqa: E501
:rtype: Eligibility
"""
return self._eligibility
@eligibility.setter
def eligibility(self, eligibility):
"""Sets the eligibility of this PrivateTrial.
:param eligibility: The eligibility of this PrivateTrial. # noqa: E501
:type: Eligibility
"""
self._eligibility = eligibility
@property
def overall_official(self):
"""Gets the overall_official of this PrivateTrial. # noqa: E501
Person responsible for the overall scientific leadership of the protocol, including study principal investigator. # noqa: E501
:return: The overall_official of this PrivateTrial. # noqa: E501
:rtype: list[Investigator]
"""
return self._overall_official
@overall_official.setter
def overall_official(self, overall_official):
"""Sets the overall_official of this PrivateTrial.
Person responsible for the overall scientific leadership of the protocol, including study principal investigator. # noqa: E501
:param overall_official: The overall_official of this PrivateTrial. # noqa: E501
:type: list[Investigator]
"""
self._overall_official = overall_official
@property
def overall_contact(self):
"""Gets the overall_contact of this PrivateTrial. # noqa: E501
:return: The overall_contact of this PrivateTrial. # noqa: E501
:rtype: Contact
"""
return self._overall_contact
@overall_contact.setter
def overall_contact(self, overall_contact):
"""Sets the overall_contact of this PrivateTrial.
:param overall_contact: The overall_contact of this PrivateTrial. # noqa: E501
:type: Contact
"""
self._overall_contact = overall_contact
@property
def overall_contact_backup(self):
"""Gets the overall_contact_backup of this PrivateTrial. # noqa: E501
:return: The overall_contact_backup of this PrivateTrial. # noqa: E501
:rtype: Contact
"""
return self._overall_contact_backup
@overall_contact_backup.setter
def overall_contact_backup(self, overall_contact_backup):
"""Sets the overall_contact_backup of this PrivateTrial.
:param overall_contact_backup: The overall_contact_backup of this PrivateTrial. # noqa: E501
:type: Contact
"""
self._overall_contact_backup = overall_contact_backup
@property
def location(self):
"""Gets the location of this PrivateTrial. # noqa: E501
Information about the locations offering this trial. # noqa: E501
:return: The location of this PrivateTrial. # noqa: E501
:rtype: list[Location]
"""
return self._location
@location.setter
def location(self, location):
"""Sets the location of this PrivateTrial.
Information about the locations offering this trial. # noqa: E501
:param location: The location of this PrivateTrial. # noqa: E501
:type: list[Location]
"""
if location is None:
raise ValueError("Invalid value for `location`, must not be `None`") # noqa: E501
self._location = location
@property
def location_countries(self):
"""Gets the location_countries of this PrivateTrial. # noqa: E501
Countries with locations offering this trial. # noqa: E501
:return: The location_countries of this PrivateTrial. # noqa: E501
:rtype: list[str]
"""
return self._location_countries
@location_countries.setter
def location_countries(self, location_countries):
"""Sets the location_countries of this PrivateTrial.
Countries with locations offering this trial. # noqa: E501
:param location_countries: The location_countries of this PrivateTrial. # noqa: E501
:type: list[str]
"""
self._location_countries = location_countries
@property
def link(self):
"""Gets the link of this PrivateTrial. # noqa: E501
URL to institution (if private) or registry listing of this trial. # noqa: E501
:return: The link of this PrivateTrial. # noqa: E501
:rtype: str
"""
return self._link
@link.setter
def link(self, link):
"""Sets the link of this PrivateTrial.
URL to institution (if private) or registry listing of this trial. # noqa: E501
:param link: The link of this PrivateTrial. # noqa: E501
:type: str
"""
self._link = link
@property
def reference(self):
"""Gets the reference of this PrivateTrial. # noqa: E501
Reference publications pertaining to this trial. # noqa: E501
:return: The reference of this PrivateTrial. # noqa: E501
:rtype: list[Reference]
"""
return self._reference
@reference.setter
def reference(self, reference):
"""Sets the reference of this PrivateTrial.
Reference publications pertaining to this trial. # noqa: E501
:param reference: The reference of this PrivateTrial. # noqa: E501
:type: list[Reference]
"""
self._reference = reference
@property
def verification_date(self):
"""Gets the verification_date of this PrivateTrial. # noqa: E501
The date on which the responsible party last verified the clinical study information in the entire ClinicalTrials.gov record for the clinical study, even if no additional or updated information is being submitted. # noqa: E501
:return: The verification_date of this PrivateTrial. # noqa: E501
:rtype: datetime
"""
return self._verification_date
@verification_date.setter
def verification_date(self, verification_date):
"""Sets the verification_date of this PrivateTrial.
The date on which the responsible party last verified the clinical study information in the entire ClinicalTrials.gov record for the clinical study, even if no additional or updated information is being submitted. # noqa: E501
:param verification_date: The verification_date of this PrivateTrial. # noqa: E501
:type: datetime
"""
self._verification_date = verification_date
@property
def study_first_submitted(self):
"""Gets the study_first_submitted of this PrivateTrial. # noqa: E501
The date on which the study sponsor or investigator first submitted a study record to the trial registry. # noqa: E501
:return: The study_first_submitted of this PrivateTrial. # noqa: E501
:rtype: datetime
"""
return self._study_first_submitted
@study_first_submitted.setter
def study_first_submitted(self, study_first_submitted):
"""Sets the study_first_submitted of this PrivateTrial.
The date on which the study sponsor or investigator first submitted a study record to the trial registry. # noqa: E501
:param study_first_submitted: The study_first_submitted of this PrivateTrial. # noqa: E501
:type: datetime
"""
self._study_first_submitted = study_first_submitted
@property
def study_first_posted(self):
"""Gets the study_first_posted of this PrivateTrial. # noqa: E501
The date on which the study was first made public on trial registry. # noqa: E501
:return: The study_first_posted of this PrivateTrial. # noqa: E501
:rtype: datetime
"""
return self._study_first_posted
@study_first_posted.setter
def study_first_posted(self, study_first_posted):
"""Sets the study_first_posted of this PrivateTrial.
The date on which the study was first made public on trial registry. # noqa: E501
:param study_first_posted: The study_first_posted of this PrivateTrial. # noqa: E501
:type: datetime
"""
self._study_first_posted = study_first_posted
@property
def last_update_posted(self):
"""Gets the last_update_posted of this PrivateTrial. # noqa: E501
The most recent date that any information was updated for this trial. # noqa: E501
:return: The last_update_posted of this PrivateTrial. # noqa: E501
:rtype: datetime
"""
return self._last_update_posted
@last_update_posted.setter
def last_update_posted(self, last_update_posted):
"""Sets the last_update_posted of this PrivateTrial.
The most recent date that any information was updated for this trial. # noqa: E501
:param last_update_posted: The last_update_posted of this PrivateTrial. # noqa: E501
:type: datetime
"""
self._last_update_posted = last_update_posted
@property
def keyword(self):
"""Gets the keyword of this PrivateTrial. # noqa: E501
Words or phrases that best describe the protocol. Keywords help users find studies in the database. Use NLM's Medical Subject Heading (MeSH)-controlled vocabulary terms where appropriate. Be as specific and precise as possible. # noqa: E501
:return: The keyword of this PrivateTrial. # noqa: E501
:rtype: list[str]
"""
return self._keyword
@keyword.setter
def keyword(self, keyword):
"""Sets the keyword of this PrivateTrial.
Words or phrases that best describe the protocol. Keywords help users find studies in the database. Use NLM's Medical Subject Heading (MeSH)-controlled vocabulary terms where appropriate. Be as specific and precise as possible. # noqa: E501
:param keyword: The keyword of this PrivateTrial. # noqa: E501
:type: list[str]
"""
self._keyword = keyword
@property
def responsible_party(self):
"""Gets the responsible_party of this PrivateTrial. # noqa: E501
The entities and individuals responsible for this trial. # noqa: E501
:return: The responsible_party of this PrivateTrial. # noqa: E501
:rtype: list[ResponsibleParty]
"""
return self._responsible_party
@responsible_party.setter
def responsible_party(self, responsible_party):
"""Sets the responsible_party of this PrivateTrial.
The entities and individuals responsible for this trial. # noqa: E501
:param responsible_party: The responsible_party of this PrivateTrial. # noqa: E501
:type: list[ResponsibleParty]
"""
self._responsible_party = responsible_party
@property
def processing_status(self):
"""Gets the processing_status of this PrivateTrial. # noqa: E501
Indication of its level of readiness and incorporation into the MolecularMatch Knowledge base. # noqa: E501
:return: The processing_status of this PrivateTrial. # noqa: E501
:rtype: str
"""
return self._processing_status
@processing_status.setter
def processing_status(self, processing_status):
"""Sets the processing_status of this PrivateTrial.
Indication of its level of readiness and incorporation into the MolecularMatch Knowledge base. # noqa: E501
:param processing_status: The processing_status of this PrivateTrial. # noqa: E501
:type: str
"""
allowed_values = ["received", "in-process", "registered"] # noqa: E501
if processing_status not in allowed_values:
raise ValueError(
"Invalid value for `processing_status` ({0}), must be one of {1}" # noqa: E501
.format(processing_status, allowed_values)
)
self._processing_status = processing_status
@property
def test(self):
"""Gets the test of this PrivateTrial. # noqa: E501
A flag to mark test private trials. # noqa: E501
:return: The test of this PrivateTrial. # noqa: E501
:rtype: bool
"""
return self._test
@test.setter
def test(self, test):
"""Sets the test of this PrivateTrial.
A flag to mark test private trials. # noqa: E501
:param test: The test of this PrivateTrial. # noqa: E501
:type: bool
"""
self._test = test
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PrivateTrial, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PrivateTrial):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 35.920525 | 993 | 0.649595 | 51,702 | 0.994633 | 0 | 0 | 38,903 | 0.748408 | 0 | 0 | 31,444 | 0.604913 |
382e7f7f1ccdfe04486fb53156b6e7f57b31f553 | 1,174 | py | Python | play_heuristic_agent.py | Jueun-Park/gym-module-select | e7b3065b2203218dbe15bfaefecc81074ac6bc76 | [
"MIT"
] | null | null | null | play_heuristic_agent.py | Jueun-Park/gym-module-select | e7b3065b2203218dbe15bfaefecc81074ac6bc76 | [
"MIT"
] | null | null | null | play_heuristic_agent.py | Jueun-Park/gym-module-select | e7b3065b2203218dbe15bfaefecc81074ac6bc76 | [
"MIT"
] | null | null | null | import argparse
import gym
import gym_module_select
from stable_baselines.common.vec_env import DummyVecEnv
def init_parse_argument():
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--num-exp', help='num experiment episode', type=int, default=10)
args = parser.parse_args()
return args
args = init_parse_argument()
env = gym.make('ModuleSelect-v1',
verbose=1,
save_log_flag=True,
log_num=7,
)
env = DummyVecEnv([lambda: env])
num_done = 0
num_proc = 0
try:
obs = env.reset()
while num_done < args.num_exp:
if 0 <= num_proc <= 1:
action = [4]
elif 2 <= num_proc <= 3 or num_proc == 6:
action = [3]
elif num_proc == 5:
action = [2]
elif 8 <= num_proc <= 11:
action = [1]
elif num_proc == 4 or num_proc == 7:
action = [0]
else:
print("action error")
obs, rewards, dones, info = env.step(action)
num_proc = int(obs[0][0])
env.render()
if dones[0]:
num_done += 1
except KeyboardInterrupt:
pass
env.close()
| 24.458333 | 95 | 0.556218 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.059625 |
382e952e0abc26313f1d85400cc6243d40c68ce8 | 238 | py | Python | 01_Python_Basico_Intermediario/Aula030/aula30.py | Joao-Inacio/Curso-de-Python3 | 179d85f43f77dced640ffb143a87214538254cf3 | [
"MIT"
] | 1 | 2021-07-19T12:31:49.000Z | 2021-07-19T12:31:49.000Z | 01_Python_Basico_Intermediario/Aula030/aula30.py | Joao-Inacio/Curso-de-Python3 | 179d85f43f77dced640ffb143a87214538254cf3 | [
"MIT"
] | null | null | null | 01_Python_Basico_Intermediario/Aula030/aula30.py | Joao-Inacio/Curso-de-Python3 | 179d85f43f77dced640ffb143a87214538254cf3 | [
"MIT"
] | null | null | null | """
Funções (def) - *args **kwargs
"""
# def func(a1, a2, a3, a4, a5, nome=None, a6=None):
# print(a1, a2, a3, a4, a5, nome, a6)
def func(*args, **kwargs):
print(args, kwargs)
lista = [1, 2, 3, 4, 5]
func(*lista, nome='João')
| 17 | 51 | 0.55042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.576763 |
382f2f78668adbeb3d9c2f747861ad2dbe227500 | 78,600 | py | Python | lib/rucio/daemons/conveyor/utils.py | brianv0/rucio | 127a36fd53e5b4d9eb14ab02fe6c36443d78bfd0 | [
"Apache-2.0"
] | null | null | null | lib/rucio/daemons/conveyor/utils.py | brianv0/rucio | 127a36fd53e5b4d9eb14ab02fe6c36443d78bfd0 | [
"Apache-2.0"
] | null | null | null | lib/rucio/daemons/conveyor/utils.py | brianv0/rucio | 127a36fd53e5b4d9eb14ab02fe6c36443d78bfd0 | [
"Apache-2.0"
] | null | null | null | # Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Vincent Garonne, <vincent.garonne@cern.ch>, 2012-2014
# - Mario Lassnig, <mario.lassnig@cern.ch>, 2013-2015
# - Cedric Serfon, <cedric.serfon@cern.ch>, 2013-2016
# - Wen Guan, <wen.guan@cern.ch>, 2014-2016
# - Joaquin Bogado, <jbogadog@cern.ch>, 2016
"""
Methods common to different conveyor submitter daemons.
"""
import math
import datetime
import json
import logging
import random
import time
import traceback
from dogpile.cache import make_region
from dogpile.cache.api import NoValue
from rucio.common.closeness_sorter import sort_sources
from rucio.common.exception import DataIdentifierNotFound, RSEProtocolNotSupported, InvalidRSEExpression, InvalidRequest
from rucio.common.rse_attributes import get_rse_attributes
from rucio.common.utils import construct_surl, chunks
from rucio.core import did, replica, request, rse as rse_core
from rucio.core.monitor import record_counter, record_timer, record_gauge
from rucio.core.rse_expression_parser import parse_expression
from rucio.db.sqla.constants import DIDType, RequestType, RequestState, RSEType
from rucio.db.sqla.session import read_session
from rucio.rse import rsemanager as rsemgr
REGION_SHORT = make_region().configure('dogpile.cache.memcached',
expiration_time=600,
arguments={'url': "127.0.0.1:11211", 'distributed_lock': True})
def get_rses(rses=None, include_rses=None, exclude_rses=None):
working_rses = []
rses_list = rse_core.list_rses()
if rses:
working_rses = [rse for rse in rses_list if rse['rse'] in rses]
if include_rses:
try:
parsed_rses = parse_expression(include_rses, session=None)
except InvalidRSEExpression, e:
logging.error("Invalid RSE exception %s to include RSEs" % (include_rses))
else:
for rse in parsed_rses:
if rse not in working_rses:
working_rses.append(rse)
if not (rses or include_rses):
working_rses = rses_list
if exclude_rses:
try:
parsed_rses = parse_expression(exclude_rses, session=None)
except InvalidRSEExpression, e:
logging.error("Invalid RSE exception %s to exclude RSEs: %s" % (exclude_rses, e))
else:
working_rses = [rse for rse in working_rses if rse not in parsed_rses]
working_rses = [rsemgr.get_rse_info(rse['rse']) for rse in working_rses]
return working_rses
def get_requests(rse_id=None,
process=0, total_processes=1, thread=0, total_threads=1,
mock=False, bulk=100, activity=None, activity_shares=None):
ts = time.time()
reqs = request.get_next(request_type=[RequestType.TRANSFER,
RequestType.STAGEIN,
RequestType.STAGEOUT],
state=RequestState.QUEUED,
limit=bulk,
rse=rse_id,
activity=activity,
process=process,
total_processes=total_processes,
thread=thread,
total_threads=total_threads,
activity_shares=activity_shares)
record_timer('daemons.conveyor.submitter.get_next', (time.time() - ts) * 1000)
return reqs
def get_sources(dest_rse, schemes, req, max_sources=4):
allowed_rses = []
if req['request_type'] == RequestType.STAGEIN:
rses = rse_core.list_rses(filters={'staging_buffer': dest_rse['rse']})
allowed_rses = [x['rse'] for x in rses]
allowed_source_rses = []
if req['attributes']:
if type(req['attributes']) is dict:
req_attributes = json.loads(json.dumps(req['attributes']))
else:
req_attributes = json.loads(str(req['attributes']))
source_replica_expression = req_attributes["source_replica_expression"]
if source_replica_expression:
try:
parsed_rses = parse_expression(source_replica_expression, session=None)
except InvalidRSEExpression, e:
logging.error("Invalid RSE exception %s for request %s: %s" % (source_replica_expression,
req['request_id'],
e))
allowed_source_rses = []
else:
allowed_source_rses = [x['rse'] for x in parsed_rses]
tmpsrc = []
metadata = {}
try:
ts = time.time()
replications = replica.list_replicas(dids=[{'scope': req['scope'],
'name': req['name'],
'type': DIDType.FILE}],
schemes=schemes)
record_timer('daemons.conveyor.submitter.list_replicas', (time.time() - ts) * 1000)
# return gracefully if there are no replicas for a DID
if not replications:
return None, None
for source in replications:
try:
metadata['filesize'] = long(source['bytes'])
except KeyError, e:
logging.error('source for %s:%s has no filesize set - skipping' % (source['scope'], source['name']))
continue
metadata['md5'] = source['md5']
metadata['adler32'] = source['adler32']
# TODO: Source protection
# we need to know upfront if we are mixed DISK/TAPE source
mixed_source = []
for source_rse in source['rses']:
mixed_source.append(rse_core.get_rse(source_rse).rse_type)
mixed_source = True if len(set(mixed_source)) > 1 else False
for source_rse in source['rses']:
if req['request_type'] == RequestType.STAGEIN:
if source_rse in allowed_rses:
for pfn in source['rses'][source_rse]:
# In case of staging request, we only use one source
tmpsrc = [(str(source_rse), str(pfn)), ]
elif req['request_type'] == RequestType.TRANSFER:
if source_rse == dest_rse['rse']:
logging.debug('Skip source %s for request %s because it is the destination' % (source_rse,
req['request_id']))
continue
if allowed_source_rses and not (source_rse in allowed_source_rses):
logging.debug('Skip source %s for request %s because of source_replica_expression %s' % (source_rse,
req['request_id'],
req['attributes']))
continue
# do not allow mixed source jobs, either all DISK or all TAPE
# do not use TAPE on the first try
if mixed_source:
if not req['previous_attempt_id'] and rse_core.get_rse(source_rse).rse_type == RSEType.TAPE and source_rse not in allowed_source_rses:
logging.debug('Skip tape source %s for request %s' % (source_rse,
req['request_id']))
continue
elif req['previous_attempt_id'] and rse_core.get_rse(source_rse).rse_type == RSEType.DISK and source_rse not in allowed_source_rses:
logging.debug('Skip disk source %s for retrial request %s' % (source_rse,
req['request_id']))
continue
filtered_sources = [x for x in source['rses'][source_rse] if x.startswith('gsiftp')]
if not filtered_sources:
filtered_sources = source['rses'][source_rse]
for pfn in filtered_sources:
tmpsrc.append((str(source_rse), str(pfn)))
except DataIdentifierNotFound:
record_counter('daemons.conveyor.submitter.lost_did')
logging.warn('DID %s:%s does not exist anymore - marking request %s as LOST' % (req['scope'],
req['name'],
req['request_id']))
return None, None
except:
record_counter('daemons.conveyor.submitter.unexpected')
logging.critical('Something unexpected happened: %s' % traceback.format_exc())
return None, None
sources = []
if tmpsrc == []:
record_counter('daemons.conveyor.submitter.nosource')
logging.warn('No source replicas found for DID %s:%s - deep check for unavailable replicas' % (req['scope'],
req['name']))
if sum(1 for tmp in replica.list_replicas([{'scope': req['scope'],
'name': req['name'],
'type': DIDType.FILE}],
schemes=schemes,
unavailable=True)):
logging.error('DID %s:%s lost! This should not happen!' % (req['scope'], req['name']))
return None, None
else:
used_sources = request.get_sources(req['request_id'])
for tmp in tmpsrc:
source_rse_info = rsemgr.get_rse_info(tmp[0])
rank = None
if used_sources:
for used_source in used_sources:
if used_source['rse_id'] == source_rse_info['id']:
# file already used
rank = used_source['ranking']
break
sources.append((tmp[0], tmp[1], source_rse_info['id'], rank))
if len(sources) > 1:
sources = sort_sources(sources, dest_rse['rse'])
if len(sources) > max_sources:
sources = sources[:max_sources]
random.shuffle(sources)
return sources, metadata
def get_destinations(rse_info, scheme, req, naming_convention):
dsn = 'other'
pfn = {}
if not rse_info['deterministic']:
ts = time.time()
# get rule scope and name
if req['attributes']:
if type(req['attributes']) is dict:
req_attributes = json.loads(json.dumps(req['attributes']))
else:
req_attributes = json.loads(str(req['attributes']))
if 'ds_name' in req_attributes:
dsn = req_attributes["ds_name"]
if dsn == 'other':
# select a containing dataset
for parent in did.list_parent_dids(req['scope'], req['name']):
if parent['type'] == DIDType.DATASET:
dsn = parent['name']
break
record_timer('daemons.conveyor.submitter.list_parent_dids', (time.time() - ts) * 1000)
# DQ2 path always starts with /, but prefix might not end with /
path = construct_surl(dsn, req['name'], naming_convention)
# retrial transfers to tape need a new filename - add timestamp
if req['request_type'] == RequestType.TRANSFER and rse_info['rse_type'] == 'TAPE':
if 'previous_attempt_id' in req and req['previous_attempt_id']:
path = '%s_%i' % (path, int(time.time()))
logging.debug('Retrial transfer request %s DID %s:%s to tape %s renamed to %s' % (req['request_id'],
req['scope'],
req['name'],
rse_info['rse'],
path))
elif req['activity'] and req['activity'] == 'Recovery':
path = '%s_%i' % (path, int(time.time()))
logging.debug('Recovery transfer request %s DID %s:%s to tape %s renamed to %s' % (req['request_id'],
req['scope'],
req['name'],
rse_info['rse'],
path))
# we must set the destination path for nondeterministic replicas explicitly
replica.update_replicas_paths([{'scope': req['scope'],
'name': req['name'],
'rse_id': req['dest_rse_id'],
'path': path}])
lfn = [{'scope': req['scope'], 'name': req['name'], 'path': path}]
else:
lfn = [{'scope': req['scope'], 'name': req['name']}]
ts = time.time()
try:
pfn = rsemgr.lfns2pfns(rse_info, lfns=lfn, operation='write', scheme=scheme)
except RSEProtocolNotSupported:
logging.error('Operation "write" not supported by %s' % (rse_info['rse']))
return None, None
record_timer('daemons.conveyor.submitter.lfns2pfns', (time.time() - ts) * 1000)
destinations = []
for k in pfn:
if isinstance(pfn[k], (str, unicode)):
destinations.append(pfn[k])
elif isinstance(pfn[k], (tuple, list)):
for url in pfn[k]:
destinations.append(pfn[k][url])
protocol = None
try:
protocol = rsemgr.select_protocol(rse_info, 'write', scheme=scheme)
except RSEProtocolNotSupported:
logging.error('Operation "write" not supported by %s' % (rse_info['rse']))
return None, None
# we need to set the spacetoken if we use SRM
dest_spacetoken = None
if protocol['extended_attributes'] and 'space_token' in protocol['extended_attributes']:
dest_spacetoken = protocol['extended_attributes']['space_token']
return destinations, dest_spacetoken
def get_transfer(rse, req, scheme, mock, max_sources=4):
src_spacetoken = None
if req['request_type'] == RequestType.STAGEIN:
# for staging in, get the sources at first, then use the sources as destination
if not (rse['staging_area'] or rse['rse'].endswith("STAGING")):
raise InvalidRequest('Not a STAGING RSE for STAGE-IN request')
ts = time.time()
if scheme is None:
sources, metadata = get_sources(rse, None, req, max_sources=max_sources)
else:
if not isinstance(scheme, list):
scheme = scheme.split(',')
sources, metadata = get_sources(rse, scheme, req, max_sources=max_sources)
record_timer('daemons.conveyor.submitter.get_sources', (time.time() - ts) * 1000)
logging.debug('Sources for request %s: %s' % (req['request_id'], sources))
if sources is None:
logging.error("Request %s DID %s:%s RSE %s failed to get sources" % (req['request_id'],
req['scope'],
req['name'],
rse['rse']))
return None
filesize = metadata['filesize']
md5 = metadata['md5']
adler32 = metadata['adler32']
# Sources are properly set, so now we can finally force the source RSE to the destination RSE for STAGEIN
dest_rse = sources[0][0]
rse_attr = rse_core.list_rse_attributes(sources[0][0])
fts_hosts = rse_attr.get('fts', None)
naming_convention = rse_attr.get('naming_convention', None)
if len(sources) == 1:
destinations = [sources[0][1]]
else:
# TODO: need to check
return None
protocol = None
try:
# for stagin, dest_space_token should be the source space token
source_rse_info = rsemgr.get_rse_info(sources[0][0])
protocol = rsemgr.select_protocol(source_rse_info, 'write')
except RSEProtocolNotSupported:
logging.error('Operation "write" not supported by %s' % (source_rse_info['rse']))
return None
# we need to set the spacetoken if we use SRM
dest_spacetoken = None
if 'space_token' in protocol['extended_attributes']:
dest_spacetoken = protocol['extended_attributes']['space_token']
# Extend the metadata dictionary with request attributes
copy_pin_lifetime, overwrite, bring_online = -1, True, None
if req['attributes']:
if type(req['attributes']) is dict:
attr = json.loads(json.dumps(req['attributes']))
else:
attr = json.loads(str(req['attributes']))
copy_pin_lifetime = attr.get('lifetime')
overwrite = False
bring_online = 172800
else:
# for normal transfer, get the destination at first, then use the destination scheme to get sources
rse_attr = rse_core.list_rse_attributes(rse['rse'], rse['id'])
fts_hosts = rse_attr.get('fts', None)
naming_convention = rse_attr.get('naming_convention', None)
ts = time.time()
destinations, dest_spacetoken = get_destinations(rse, scheme, req, naming_convention)
record_timer('daemons.conveyor.submitter.get_destinations', (time.time() - ts) * 1000)
logging.debug('Destinations for request %s: %s' % (req['request_id'], destinations))
if destinations is None:
logging.error("Request %s DID %s:%s RSE %s failed to get destinations" % (req['request_id'],
req['scope'],
req['name'],
rse['rse']))
return None
schemes = []
for destination in destinations:
schemes.append(destination.split("://")[0])
if 'srm' in schemes and 'gsiftp' not in schemes:
schemes.append('gsiftp')
if 'gsiftp' in schemes and 'srm' not in schemes:
schemes.append('srm')
logging.debug('Schemes will be allowed for sources: %s' % (schemes))
ts = time.time()
sources, metadata = get_sources(rse, schemes, req, max_sources=max_sources)
record_timer('daemons.conveyor.submitter.get_sources', (time.time() - ts) * 1000)
logging.debug('Sources for request %s: %s' % (req['request_id'], sources))
if not sources:
logging.error("Request %s DID %s:%s RSE %s failed to get sources" % (req['request_id'],
req['scope'],
req['name'],
rse['rse']))
return None
dest_rse = rse['rse']
# exclude destination replica from source
new_sources = sources
for source in sources:
if source[0] == dest_rse:
logging.info('Excluding source %s for request %s: source is destination' % (source[0],
req['request_id']))
new_sources.remove(source)
sources = new_sources
filesize = metadata['filesize']
md5 = metadata['md5']
adler32 = metadata['adler32']
# Extend the metadata dictionary with request attributes
copy_pin_lifetime, overwrite, bring_online = -1, True, None
if rse_core.get_rse(sources[0][0]).rse_type == RSEType.TAPE:
bring_online = 172800
if rse_core.get_rse(None, rse_id=req['dest_rse_id']).rse_type == RSEType.TAPE:
overwrite = False
# make sure we only use one source when bring_online is needed
if bring_online and len(sources) > 1:
sources = [sources[0]]
logging.info('Only using first source %s for bring_online request %s' % (sources,
req['request_id']))
# Come up with mock sources if necessary
if mock:
tmp_sources = []
for s in sources:
tmp_sources.append((s[0], ':'.join(['mock'] + s[1].split(':')[1:]), s[2], s[3]))
sources = tmp_sources
source_surls = [s[1] for s in sources]
if not source_surls:
logging.error('All sources excluded - SKIP REQUEST %s' % req['request_id'])
return
tmp_metadata = {'request_id': req['request_id'],
'scope': req['scope'],
'name': req['name'],
'activity': req['activity'],
'src_rse': sources[0][0],
'dst_rse': dest_rse,
'dest_rse_id': req['dest_rse_id'],
'filesize': filesize,
'md5': md5,
'adler32': adler32}
if 'previous_attempt_id' in req and req['previous_attempt_id']:
tmp_metadata['previous_attempt_id'] = req['previous_attempt_id']
retry_count = req['retry_count']
if not retry_count:
retry_count = 0
if not fts_hosts:
logging.error('Destination RSE %s FTS attribute not defined - SKIP REQUEST %s' % (rse['rse'], req['request_id']))
return
fts_list = fts_hosts.split(",")
external_host = fts_list[retry_count % len(fts_list)]
transfer = {'request_id': req['request_id'],
'sources': sources,
# 'src_urls': source_surls,
'dest_urls': destinations,
'filesize': filesize,
'md5': md5,
'adler32': adler32,
'src_spacetoken': src_spacetoken,
'dest_spacetoken': dest_spacetoken,
'activity': req['activity'],
'overwrite': overwrite,
'bring_online': bring_online,
'copy_pin_lifetime': copy_pin_lifetime,
'external_host': external_host,
'file_metadata': tmp_metadata,
'rule_id': req['rule_id']}
return transfer
def get_transfers_from_requests(process=0, total_processes=1, thread=0, total_threads=1, rse_ids=None,
mock=False, bulk=100, activity=None, activity_shares=None, scheme=None, max_sources=4):
ts = time.time()
reqs = get_requests(process=process,
total_processes=total_processes,
thread=thread,
total_threads=total_threads,
mock=mock,
bulk=bulk,
activity=activity,
activity_shares=activity_shares)
record_timer('daemons.conveyor.submitter.get_requests', (time.time() - ts) * 1000)
if reqs:
logging.debug('%i:%i - Getting %i requests' % (process, thread, len(reqs)))
if not reqs or reqs == []:
return {}
# get transfers
transfers = {}
for req in reqs:
try:
if rse_ids and req['dest_rse_id'] not in rse_ids:
# logging.info("Request dest %s is not in RSEs list, skip")
continue
else:
dest_rse = rse_core.get_rse(rse=None, rse_id=req['dest_rse_id'])
rse_info = rsemgr.get_rse_info(dest_rse['rse'])
ts = time.time()
transfer = get_transfer(rse_info, req, scheme, mock, max_sources=max_sources)
record_timer('daemons.conveyor.submitter.get_transfer', (time.time() - ts) * 1000)
logging.debug('Transfer for request %s: %s' % (req['request_id'], transfer))
if transfer is None:
logging.error("Request %s DID %s:%s RSE %s failed to get transfer" % (req['request_id'],
req['scope'],
req['name'],
rse_info['rse']))
request.set_request_state(req['request_id'], RequestState.LOST)
continue
transfers[req['request_id']] = transfer
except Exception, e:
logging.error("Failed to get transfer for request(%s): %s " % (req['request_id'], str(e)))
return transfers
def bulk_group_transfer(transfers, policy='rule', group_bulk=200, fts_source_strategy='auto', max_time_in_queue=None):
grouped_transfers = {}
grouped_jobs = {}
for request_id in transfers:
transfer = transfers[request_id]
external_host = transfer['external_host']
if external_host not in grouped_transfers:
grouped_transfers[external_host] = {}
grouped_jobs[external_host] = []
file = {'sources': transfer['sources'],
'destinations': transfer['dest_urls'],
'metadata': transfer['file_metadata'],
'filesize': int(transfer['file_metadata']['filesize']),
'checksum': None,
'selection_strategy': fts_source_strategy,
'request_type': transfer['file_metadata'].get('request_type', None),
'activity': str(transfer['file_metadata']['activity'])}
if file['metadata'].get('verify_checksum', True):
if 'md5' in file['metadata'].keys() and file['metadata']['md5']:
file['checksum'] = 'MD5:%s' % str(file['metadata']['md5'])
if 'adler32' in file['metadata'].keys() and file['metadata']['adler32']:
file['checksum'] = 'ADLER32:%s' % str(file['metadata']['adler32'])
job_params = {'verify_checksum': True if file['checksum'] and file['metadata'].get('verify_checksum', True) else False,
'spacetoken': transfer['dest_spacetoken'] if transfer['dest_spacetoken'] else 'null',
'copy_pin_lifetime': transfer['copy_pin_lifetime'] if transfer['copy_pin_lifetime'] else -1,
'bring_online': transfer['bring_online'] if transfer['bring_online'] else None,
'job_metadata': {'issuer': 'rucio'}, # finaly job_meta will like this. currently job_meta will equal file_meta to include request_id and etc.
'source_spacetoken': transfer['src_spacetoken'] if transfer['src_spacetoken'] else None,
'overwrite': transfer['overwrite'],
'priority': 3}
if max_time_in_queue:
if transfer['file_metadata']['activity'] in max_time_in_queue:
job_params['max_time_in_queue'] = max_time_in_queue[transfer['file_metadata']['activity']]
elif 'default' in max_time_in_queue:
job_params['max_time_in_queue'] = max_time_in_queue['default']
# for multiple source replicas, no bulk submission
if len(transfer['sources']) > 1:
job_params['job_metadata']['multi_sources'] = True
grouped_jobs[external_host].append({'files': [file], 'job_params': job_params})
else:
job_params['job_metadata']['multi_sources'] = False
job_key = '%s,%s,%s,%s,%s,%s,%s,%s' % (job_params['verify_checksum'], job_params['spacetoken'], job_params['copy_pin_lifetime'],
job_params['bring_online'], job_params['job_metadata'], job_params['source_spacetoken'],
job_params['overwrite'], job_params['priority'])
if 'max_time_in_queue' in job_params:
job_key = job_key + ',%s' % job_params['max_time_in_queue']
if job_key not in grouped_transfers[external_host]:
grouped_transfers[external_host][job_key] = {}
if policy == 'rule':
policy_key = '%s' % (transfer['rule_id'])
if policy == 'dest':
policy_key = '%s' % (file['metadata']['dst_rse'])
if policy == 'src_dest':
policy_key = '%s,%s' % (file['metadata']['src_rse'], file['metadata']['dst_rse'])
if policy == 'rule_src_dest':
policy_key = '%s,%s,%s' % (transfer['rule_id'], file['metadata']['src_rse'], file['metadata']['dst_rse'])
# maybe here we need to hash the key if it's too long
if policy_key not in grouped_transfers[external_host][job_key]:
grouped_transfers[external_host][job_key][policy_key] = {'files': [file], 'job_params': job_params}
else:
grouped_transfers[external_host][job_key][policy_key]['files'].append(file)
# for jobs with different job_key, we cannot put in one job.
for external_host in grouped_transfers:
for job_key in grouped_transfers[external_host]:
# for all policy groups in job_key, the job_params is the same.
for policy_key in grouped_transfers[external_host][job_key]:
job_params = grouped_transfers[external_host][job_key][policy_key]['job_params']
for xfers_files in chunks(grouped_transfers[external_host][job_key][policy_key]['files'], group_bulk):
# for the last small piece, just submit it.
grouped_jobs[external_host].append({'files': xfers_files, 'job_params': job_params})
return grouped_jobs
@read_session
def get_unavailable_read_rse_ids(session=None):
key = 'unavailable_read_rse_ids'
result = REGION_SHORT.get(key)
if type(result) is NoValue:
try:
logging.debug("Refresh unavailable read rses")
unavailable_read_rses = rse_core.list_rses(filters={'availability_read': False}, session=session)
unavailable_read_rse_ids = [r['id'] for r in unavailable_read_rses]
REGION_SHORT.set(key, unavailable_read_rse_ids)
return unavailable_read_rse_ids
except:
logging.warning("Failed to refresh unavailable read rses, error: %s" % (traceback.format_exc()))
return []
return result
@read_session
def get_transfer_requests_and_source_replicas(process=None, total_processes=None, thread=None, total_threads=None,
limit=None, activity=None, older_than=None, rses=None, schemes=None,
bring_online=43200, retry_other_fts=False, failover_schemes=None, session=None):
req_sources = request.list_transfer_requests_and_source_replicas(process=process, total_processes=total_processes, thread=thread, total_threads=total_threads,
limit=limit, activity=activity, older_than=older_than, rses=rses, session=session)
unavailable_read_rse_ids = get_unavailable_read_rse_ids(session=session)
bring_online_local = bring_online
transfers, rses_info, protocols, rse_attrs, reqs_no_source, reqs_only_tape_source, reqs_scheme_mismatch = {}, {}, {}, {}, [], [], []
for id, rule_id, scope, name, md5, adler32, bytes, activity, attributes, previous_attempt_id, dest_rse_id, source_rse_id, rse, deterministic, rse_type, path, retry_count, src_url, ranking, link_ranking in req_sources:
transfer_src_type = "DISK"
transfer_dst_type = "DISK"
allow_tape_source = True
try:
if rses and dest_rse_id not in rses:
continue
current_schemes = schemes
if previous_attempt_id and failover_schemes:
current_schemes = failover_schemes
if id not in transfers:
if id not in reqs_no_source:
reqs_no_source.append(id)
# source_rse_id will be None if no source replicas
# rse will be None if rse is staging area
if source_rse_id is None or rse is None:
continue
if link_ranking is None:
logging.debug("Request %s: no link from %s to %s" % (id, source_rse_id, dest_rse_id))
continue
if source_rse_id in unavailable_read_rse_ids:
continue
# Get destination rse information and protocol
if dest_rse_id not in rses_info:
dest_rse = rse_core.get_rse_name(rse_id=dest_rse_id, session=session)
rses_info[dest_rse_id] = rsemgr.get_rse_info(dest_rse, session=session)
if dest_rse_id not in rse_attrs:
rse_attrs[dest_rse_id] = get_rse_attributes(dest_rse_id, session=session)
attr = None
if attributes:
if type(attributes) is dict:
attr = json.loads(json.dumps(attributes))
else:
attr = json.loads(str(attributes))
# parse source expression
source_replica_expression = attr["source_replica_expression"] if (attr and "source_replica_expression" in attr) else None
if source_replica_expression:
try:
parsed_rses = parse_expression(source_replica_expression, session=session)
except InvalidRSEExpression, e:
logging.error("Invalid RSE exception %s: %s" % (source_replica_expression, e))
continue
else:
allowed_rses = [x['rse'] for x in parsed_rses]
if rse not in allowed_rses:
continue
# parse allow tape source expression, not finally version.
# allow_tape_source = attr["allow_tape_source"] if (attr and "allow_tape_source" in attr) else True
allow_tape_source = True
# Get protocol
if dest_rse_id not in protocols:
try:
protocols[dest_rse_id] = rsemgr.create_protocol(rses_info[dest_rse_id], 'write', current_schemes)
except RSEProtocolNotSupported:
logging.error('Operation "write" not supported by %s with schemes %s' % (rses_info[dest_rse_id]['rse'], current_schemes))
if id in reqs_no_source:
reqs_no_source.remove(id)
if id not in reqs_scheme_mismatch:
reqs_scheme_mismatch.append(id)
continue
# get dest space token
dest_spacetoken = None
if protocols[dest_rse_id].attributes and \
'extended_attributes' in protocols[dest_rse_id].attributes and \
protocols[dest_rse_id].attributes['extended_attributes'] and \
'space_token' in protocols[dest_rse_id].attributes['extended_attributes']:
dest_spacetoken = protocols[dest_rse_id].attributes['extended_attributes']['space_token']
# Compute the destination url
if rses_info[dest_rse_id]['deterministic']:
dest_url = protocols[dest_rse_id].lfns2pfns(lfns={'scope': scope, 'name': name}).values()[0]
else:
# compute dest url in case of non deterministic
# naming convention, etc.
dsn = 'other'
if attr and 'ds_name' in attr:
dsn = attr["ds_name"]
else:
# select a containing dataset
for parent in did.list_parent_dids(scope, name):
if parent['type'] == DIDType.DATASET:
dsn = parent['name']
break
# DQ2 path always starts with /, but prefix might not end with /
naming_convention = rse_attrs[dest_rse_id].get('naming_convention', None)
dest_path = construct_surl(dsn, name, naming_convention)
if rses_info[dest_rse_id]['rse_type'] == RSEType.TAPE or rses_info[dest_rse_id]['rse_type'] == 'TAPE':
if retry_count or activity == 'Recovery':
dest_path = '%s_%i' % (dest_path, int(time.time()))
dest_url = protocols[dest_rse_id].lfns2pfns(lfns={'scope': scope, 'name': name, 'path': dest_path}).values()[0]
# get allowed source scheme
src_schemes = []
dest_scheme = dest_url.split("://")[0]
if dest_scheme in ['srm', 'gsiftp']:
src_schemes = ['srm', 'gsiftp']
else:
src_schemes = [dest_scheme]
# Compute the sources: urls, etc
if source_rse_id not in rses_info:
# source_rse = rse_core.get_rse_name(rse_id=source_rse_id, session=session)
source_rse = rse
rses_info[source_rse_id] = rsemgr.get_rse_info(source_rse, session=session)
# Get protocol
source_rse_id_key = '%s_%s' % (source_rse_id, '_'.join(src_schemes))
if source_rse_id_key not in protocols:
try:
protocols[source_rse_id_key] = rsemgr.create_protocol(rses_info[source_rse_id], 'read', src_schemes)
except RSEProtocolNotSupported:
logging.error('Operation "read" not supported by %s with schemes %s' % (rses_info[source_rse_id]['rse'], src_schemes))
if id in reqs_no_source:
reqs_no_source.remove(id)
if id not in reqs_scheme_mismatch:
reqs_scheme_mismatch.append(id)
continue
source_url = protocols[source_rse_id_key].lfns2pfns(lfns={'scope': scope, 'name': name, 'path': path}).values()[0]
# Extend the metadata dictionary with request attributes
overwrite, bring_online = True, None
if rses_info[source_rse_id]['rse_type'] == RSEType.TAPE or rses_info[source_rse_id]['rse_type'] == 'TAPE':
bring_online = bring_online_local
transfer_src_type = "TAPE"
if not allow_tape_source:
if id not in reqs_only_tape_source:
reqs_only_tape_source.append(id)
if id in reqs_no_source:
reqs_no_source.remove(id)
continue
if rses_info[dest_rse_id]['rse_type'] == RSEType.TAPE or rses_info[dest_rse_id]['rse_type'] == 'TAPE':
overwrite = False
transfer_dst_type = "TAPE"
# get external_host
fts_hosts = rse_attrs[dest_rse_id].get('fts', None)
if not fts_hosts:
logging.error('Source RSE %s FTS attribute not defined - SKIP REQUEST %s' % (rse, id))
continue
if retry_count is None:
retry_count = 0
fts_list = fts_hosts.split(",")
external_host = fts_list[0]
if retry_other_fts:
external_host = fts_list[retry_count % len(fts_list)]
if id in reqs_no_source:
reqs_no_source.remove(id)
if id in reqs_only_tape_source:
reqs_only_tape_source.remove(id)
file_metadata = {'request_id': id,
'scope': scope,
'name': name,
'activity': activity,
'request_type': str(RequestType.TRANSFER).lower(),
'src_type': transfer_src_type,
'dst_type': transfer_dst_type,
'src_rse': rse,
'dst_rse': rses_info[dest_rse_id]['rse'],
'src_rse_id': source_rse_id,
'dest_rse_id': dest_rse_id,
'filesize': bytes,
'md5': md5,
'adler32': adler32,
'verify_checksum': rse_attrs[dest_rse_id].get('verify_checksum', True)}
if previous_attempt_id:
file_metadata['previous_attempt_id'] = previous_attempt_id
transfers[id] = {'request_id': id,
'schemes': src_schemes,
# 'src_urls': [source_url],
'sources': [(rse, source_url, source_rse_id, ranking if ranking is not None else 0, link_ranking)],
'dest_urls': [dest_url],
'src_spacetoken': None,
'dest_spacetoken': dest_spacetoken,
'overwrite': overwrite,
'bring_online': bring_online,
'copy_pin_lifetime': attr.get('lifetime', -1),
'external_host': external_host,
'selection_strategy': 'auto',
'rule_id': rule_id,
'file_metadata': file_metadata}
else:
schemes = transfers[id]['schemes']
# source_rse_id will be None if no source replicas
# rse will be None if rse is staging area
if source_rse_id is None or rse is None:
continue
if link_ranking is None:
logging.debug("Request %s: no link from %s to %s" % (id, source_rse_id, dest_rse_id))
continue
if source_rse_id in unavailable_read_rse_ids:
continue
attr = None
if attributes:
if type(attributes) is dict:
attr = json.loads(json.dumps(attributes))
else:
attr = json.loads(str(attributes))
# parse source expression
source_replica_expression = attr["source_replica_expression"] if (attr and "source_replica_expression" in attr) else None
if source_replica_expression:
try:
parsed_rses = parse_expression(source_replica_expression, session=session)
except InvalidRSEExpression, e:
logging.error("Invalid RSE exception %s: %s" % (source_replica_expression, e))
continue
else:
allowed_rses = [x['rse'] for x in parsed_rses]
if rse not in allowed_rses:
continue
# parse allow tape source expression, not finally version.
allow_tape_source = attr["allow_tape_source"] if (attr and "allow_tape_source" in attr) else True
# Compute the sources: urls, etc
if source_rse_id not in rses_info:
# source_rse = rse_core.get_rse_name(rse_id=source_rse_id, session=session)
source_rse = rse
rses_info[source_rse_id] = rsemgr.get_rse_info(source_rse, session=session)
if ranking is None:
ranking = 0
# TAPE should not mixed with Disk and should not use as first try
# If there is a source whose ranking is no less than the Tape ranking, Tape will not be used.
if rses_info[source_rse_id]['rse_type'] == RSEType.TAPE or rses_info[source_rse_id]['rse_type'] == 'TAPE':
# current src_rse is Tape
if not allow_tape_source:
continue
if not transfers[id]['bring_online']:
# the sources already founded are disks.
avail_top_ranking = None
founded_sources = transfers[id]['sources']
for founded_source in founded_sources:
if avail_top_ranking is None:
avail_top_ranking = founded_source[3]
continue
if founded_source[3] is not None and founded_source[3] > avail_top_ranking:
avail_top_ranking = founded_source[3]
if avail_top_ranking >= ranking:
# current Tape source is not the highest ranking, will use disk sources
continue
else:
transfers[id]['sources'] = []
transfers[id]['bring_online'] = bring_online_local
transfer_src_type = "TAPE"
transfers[id]['file_metadata']['src_type'] = transfer_src_type
transfers[id]['file_metadata']['src_rse'] = rse
else:
# the sources already founded is Tape too.
# multiple Tape source replicas are not allowed in FTS3.
if transfers[id]['sources'][0][3] > ranking or (transfers[id]['sources'][0][3] == ranking and transfers[id]['sources'][0][4] >= link_ranking):
continue
else:
transfers[id]['sources'] = []
transfers[id]['bring_online'] = bring_online_local
transfers[id]['file_metadata']['src_rse'] = rse
else:
# current src_rse is Disk
if transfers[id]['bring_online']:
# the founded sources are Tape
avail_top_ranking = None
founded_sources = transfers[id]['sources']
for founded_source in founded_sources:
if avail_top_ranking is None:
avail_top_ranking = founded_source[3]
continue
if founded_source[3] is not None and founded_source[3] > avail_top_ranking:
avail_top_ranking = founded_source[3]
if ranking >= avail_top_ranking:
# current disk replica has higher ranking than founded sources
# remove founded Tape sources
transfers[id]['sources'] = []
transfers[id]['bring_online'] = None
transfer_src_type = "DISK"
transfers[id]['file_metadata']['src_type'] = transfer_src_type
transfers[id]['file_metadata']['src_rse'] = rse
else:
continue
# Get protocol
source_rse_id_key = '%s_%s' % (source_rse_id, '_'.join(schemes))
if source_rse_id_key not in protocols:
try:
protocols[source_rse_id_key] = rsemgr.create_protocol(rses_info[source_rse_id], 'read', schemes)
except RSEProtocolNotSupported:
logging.error('Operation "read" not supported by %s with schemes %s' % (rses_info[source_rse_id]['rse'], schemes))
if id not in reqs_scheme_mismatch:
reqs_scheme_mismatch.append(id)
continue
source_url = protocols[source_rse_id_key].lfns2pfns(lfns={'scope': scope, 'name': name, 'path': path}).values()[0]
# transfers[id]['src_urls'].append((source_rse_id, source_url))
transfers[id]['sources'].append((rse, source_url, source_rse_id, ranking, link_ranking))
except:
logging.critical("Exception happened when trying to get transfer for request %s: %s" % (id, traceback.format_exc()))
break
return transfers, reqs_no_source, reqs_scheme_mismatch, reqs_only_tape_source
@read_session
def get_stagein_requests_and_source_replicas(process=None, total_processes=None, thread=None, total_threads=None, failover_schemes=None,
limit=None, activity=None, older_than=None, rses=None, mock=False, schemes=None,
bring_online=43200, retry_other_fts=False, session=None):
req_sources = request.list_stagein_requests_and_source_replicas(process=process, total_processes=total_processes, thread=thread, total_threads=total_threads,
limit=limit, activity=activity, older_than=older_than, rses=rses, session=session)
transfers, rses_info, protocols, rse_attrs, reqs_no_source = {}, {}, {}, {}, []
for id, rule_id, scope, name, md5, adler32, bytes, activity, attributes, dest_rse_id, source_rse_id, rse, deterministic, rse_type, path, staging_buffer, retry_count, previous_attempt_id, src_url, ranking in req_sources:
try:
if rses and dest_rse_id not in rses:
continue
current_schemes = schemes
if previous_attempt_id and failover_schemes:
current_schemes = failover_schemes
if id not in transfers:
if id not in reqs_no_source:
reqs_no_source.append(id)
if not src_url:
# source_rse_id will be None if no source replicas
# rse will be None if rse is staging area
# staging_buffer will be None if rse has no key 'staging_buffer'
if source_rse_id is None or rse is None or staging_buffer is None:
continue
# Get destination rse information and protocol
if dest_rse_id not in rses_info:
dest_rse = rse_core.get_rse_name(rse_id=dest_rse_id, session=session)
rses_info[dest_rse_id] = rsemgr.get_rse_info(dest_rse, session=session)
if staging_buffer != rses_info[dest_rse_id]['rse']:
continue
attr = None
if attributes:
if type(attributes) is dict:
attr = json.loads(json.dumps(attributes))
else:
attr = json.loads(str(attributes))
source_replica_expression = attr["source_replica_expression"] if "source_replica_expression" in attr else None
if source_replica_expression:
try:
parsed_rses = parse_expression(source_replica_expression, session=session)
except InvalidRSEExpression, e:
logging.error("Invalid RSE exception %s: %s" % (source_replica_expression, e))
continue
else:
allowed_rses = [x['rse'] for x in parsed_rses]
if rse not in allowed_rses:
continue
if source_rse_id not in rses_info:
# source_rse = rse_core.get_rse_name(rse_id=source_rse_id, session=session)
source_rse = rse
rses_info[source_rse_id] = rsemgr.get_rse_info(source_rse, session=session)
if source_rse_id not in rse_attrs:
rse_attrs[source_rse_id] = get_rse_attributes(source_rse_id, session=session)
if source_rse_id not in protocols:
protocols[source_rse_id] = rsemgr.create_protocol(rses_info[source_rse_id], 'write', current_schemes)
# we need to set the spacetoken if we use SRM
dest_spacetoken = None
if protocols[source_rse_id].attributes and \
'extended_attributes' in protocols[source_rse_id].attributes and \
protocols[source_rse_id].attributes['extended_attributes'] and \
'space_token' in protocols[source_rse_id].attributes['extended_attributes']:
dest_spacetoken = protocols[source_rse_id].attributes['extended_attributes']['space_token']
source_url = protocols[source_rse_id].lfns2pfns(lfns={'scope': scope, 'name': name, 'path': path}).values()[0]
else:
# source_rse_id will be None if no source replicas
# rse will be None if rse is staging area
# staging_buffer will be None if rse has no key 'staging_buffer'
if source_rse_id is None or rse is None or staging_buffer is None:
continue
attr = None
if attributes:
if type(attributes) is dict:
attr = json.loads(json.dumps(attributes))
else:
attr = json.loads(str(attributes))
# to get space token and fts attribute
if source_rse_id not in rses_info:
# source_rse = rse_core.get_rse_name(rse_id=source_rse_id, session=session)
source_rse = rse
rses_info[source_rse_id] = rsemgr.get_rse_info(source_rse, session=session)
if source_rse_id not in rse_attrs:
rse_attrs[source_rse_id] = get_rse_attributes(source_rse_id, session=session)
if source_rse_id not in protocols:
protocols[source_rse_id] = rsemgr.create_protocol(rses_info[source_rse_id], 'write', current_schemes)
# we need to set the spacetoken if we use SRM
dest_spacetoken = None
if protocols[source_rse_id].attributes and \
'extended_attributes' in protocols[source_rse_id].attributes and \
protocols[source_rse_id].attributes['extended_attributes'] and \
'space_token' in protocols[source_rse_id].attributes['extended_attributes']:
dest_spacetoken = protocols[source_rse_id].attributes['extended_attributes']['space_token']
source_url = src_url
fts_hosts = rse_attrs[source_rse_id].get('fts', None)
if not fts_hosts:
logging.error('Source RSE %s FTS attribute not defined - SKIP REQUEST %s' % (rse, id))
continue
if not retry_count:
retry_count = 0
fts_list = fts_hosts.split(",")
external_host = fts_list[0]
if retry_other_fts:
external_host = fts_list[retry_count % len(fts_list)]
if id in reqs_no_source:
reqs_no_source.remove(id)
file_metadata = {'request_id': id,
'scope': scope,
'name': name,
'activity': activity,
'request_type': str(RequestType.STAGEIN).lower(),
'src_type': "TAPE",
'dst_type': "DISK",
'src_rse': rse,
'dst_rse': rse,
'src_rse_id': source_rse_id,
'dest_rse_id': dest_rse_id,
'filesize': bytes,
'md5': md5,
'adler32': adler32}
if previous_attempt_id:
file_metadata['previous_attempt_id'] = previous_attempt_id
transfers[id] = {'request_id': id,
# 'src_urls': [source_url],
'sources': [(rse, source_url, source_rse_id, ranking)],
'dest_urls': [source_url],
'src_spacetoken': None,
'dest_spacetoken': dest_spacetoken,
'overwrite': False,
'bring_online': bring_online,
'copy_pin_lifetime': attr.get('lifetime', -1) if attr else -1,
'external_host': external_host,
'selection_strategy': 'auto',
'rule_id': rule_id,
'file_metadata': file_metadata}
logging.debug("Transfer for request(%s): %s" % (id, transfers[id]))
except:
logging.critical("Exception happened when trying to get transfer for request %s: %s" % (id, traceback.format_exc()))
break
return transfers, reqs_no_source
def get_stagein_transfers(process=None, total_processes=None, thread=None, total_threads=None, failover_schemes=None,
limit=None, activity=None, older_than=None, rses=None, mock=False, schemes=None, bring_online=43200, retry_other_fts=False, session=None):
transfers, reqs_no_source = get_stagein_requests_and_source_replicas(process=process, total_processes=total_processes, thread=thread, total_threads=total_threads,
limit=limit, activity=activity, older_than=older_than, rses=rses, mock=mock, schemes=schemes,
bring_online=bring_online, retry_other_fts=retry_other_fts, failover_schemes=failover_schemes,
session=session)
request.set_requests_state(reqs_no_source, RequestState.NO_SOURCES)
return transfers
def handle_requests_with_scheme_mismatch(transfers=None, reqs_scheme_mismatch=None, schemes=None):
if not reqs_scheme_mismatch:
return transfers
for request_id in reqs_scheme_mismatch:
logging.debug("Request %s with schemes %s has mismatched sources, will handle it" % (request_id, schemes))
found_avail_source = 0
if request_id in transfers:
for source in transfers[request_id]['sources']:
ranking = source[3]
if ranking >= 0:
# if ranking less than 0, it means it already failed at least one time.
found_avail_source = 1
break
if not found_avail_source:
# todo
# try to force scheme to regenerate the dest_url and src_url
# transfer = get_transfer_from_request_id(request_id, scheme='srm') # if rsemgr can select protocol by order, we can change
# if transfer:
# transfers[request_id] = transfer
pass
return transfers
def mock_sources(sources):
tmp_sources = []
for s in sources:
tmp_sources.append((s[0], ':'.join(['mock'] + s[1].split(':')[1:]), s[2], s[3]))
sources = tmp_sources
return tmp_sources
def sort_link_ranking(sources):
rank_sources = {}
ret_sources = []
for source in sources:
rse, source_url, source_rse_id, ranking, link_ranking = source
if link_ranking not in rank_sources:
rank_sources[link_ranking] = []
rank_sources[link_ranking].append(source)
rank_keys = rank_sources.keys()
rank_keys.sort(reverse=True)
for rank_key in rank_keys:
sources_list = rank_sources[rank_key]
random.shuffle(sources_list)
ret_sources = ret_sources + sources_list
return ret_sources
def sort_ranking(sources):
logging.debug("Sources before sorting: %s" % sources)
rank_sources = {}
ret_sources = []
for source in sources:
# ranking is from sources table, is the retry times
# link_ranking is from distances table, is the link rank.
# link_ranking should not be None(None means no link, the source will not be used).
rse, source_url, source_rse_id, ranking, link_ranking = source
if ranking is None:
ranking = 0
if ranking not in rank_sources:
rank_sources[ranking] = []
rank_sources[ranking].append(source)
rank_keys = rank_sources.keys()
rank_keys.sort(reverse=True)
for rank_key in rank_keys:
sources_list = sort_link_ranking(rank_sources[rank_key])
ret_sources = ret_sources + sources_list
logging.debug("Sources after sorting: %s" % ret_sources)
return ret_sources
def get_transfers(process=None, total_processes=None, thread=None, total_threads=None,
failover_schemes=None, limit=None, activity=None, older_than=None,
rses=None, schemes=None, mock=False, max_sources=4, bring_online=43200,
retry_other_fts=False, session=None):
transfers, reqs_no_source, reqs_scheme_mismatch, reqs_only_tape_source = get_transfer_requests_and_source_replicas(process=process, total_processes=total_processes, thread=thread, total_threads=total_threads,
limit=limit, activity=activity, older_than=older_than, rses=rses, schemes=schemes,
bring_online=bring_online, retry_other_fts=retry_other_fts,
failover_schemes=failover_schemes, session=session)
request.set_requests_state(reqs_no_source, RequestState.NO_SOURCES)
request.set_requests_state(reqs_only_tape_source, RequestState.ONLY_TAPE_SOURCES)
request.set_requests_state(reqs_scheme_mismatch, RequestState.MISMATCH_SCHEME)
for request_id in transfers:
sources = transfers[request_id]['sources']
sources = sort_ranking(sources)
if len(sources) > max_sources:
sources = sources[:max_sources]
if not mock:
transfers[request_id]['sources'] = sources
else:
transfers[request_id]['sources'] = mock_sources(sources)
# remove link_ranking in the final sources
sources = transfers[request_id]['sources']
transfers[request_id]['sources'] = []
for source in sources:
rse, source_url, source_rse_id, ranking, link_ranking = source
transfers[request_id]['sources'].append((rse, source_url, source_rse_id, ranking))
transfers[request_id]['file_metadata']['src_rse'] = sources[0][0]
transfers[request_id]['file_metadata']['src_rse_id'] = sources[0][2]
logging.debug("Transfer for request(%s): %s" % (request_id, transfers[request_id]))
return transfers
def submit_transfer(external_host, job, submitter='submitter', cachedir=None, process=0, thread=0, timeout=None):
# prepare submitting
xfers_ret = {}
try:
for file in job['files']:
file_metadata = file['metadata']
request_id = file_metadata['request_id']
log_str = '%s:%s PREPARING REQUEST %s DID %s:%s TO SUBMITTING STATE PREVIOUS %s FROM %s TO %s USING %s ' % (process, thread,
file_metadata['request_id'],
file_metadata['scope'],
file_metadata['name'],
file_metadata['previous_attempt_id'] if 'previous_attempt_id' in file_metadata else None,
file['sources'],
file['destinations'],
external_host)
xfers_ret[request_id] = {'state': RequestState.SUBMITTING, 'external_host': external_host, 'external_id': None, 'dest_url': file['destinations'][0]}
logging.info("%s" % (log_str))
xfers_ret[request_id]['file'] = file
logging.debug("%s:%s start to prepare transfer" % (process, thread))
request.prepare_request_transfers(xfers_ret)
logging.debug("%s:%s finished to prepare transfer" % (process, thread))
except:
logging.error("%s:%s Failed to prepare requests %s state to SUBMITTING(Will not submit jobs but return directly) with error: %s" % (process, thread, xfers_ret.keys(), traceback.format_exc()))
return
# submit the job
eid = None
try:
ts = time.time()
logging.info("%s:%s About to submit job to %s with timeout %s" % (process, thread, external_host, timeout))
eid = request.submit_bulk_transfers(external_host, files=job['files'], transfertool='fts3', job_params=job['job_params'], timeout=timeout)
duration = time.time() - ts
logging.info("%s:%s Submit job %s to %s in %s seconds" % (process, thread, eid, external_host, duration))
record_timer('daemons.conveyor.%s.submit_bulk_transfer.per_file' % submitter, (time.time() - ts) * 1000 / len(job['files']))
record_counter('daemons.conveyor.%s.submit_bulk_transfer' % submitter, len(job['files']))
record_timer('daemons.conveyor.%s.submit_bulk_transfer.files' % submitter, len(job['files']))
except Exception, ex:
logging.error("Failed to submit a job with error %s: %s" % (str(ex), traceback.format_exc()))
# register transfer
xfers_ret = {}
try:
for file in job['files']:
file_metadata = file['metadata']
request_id = file_metadata['request_id']
log_str = '%s:%s COPYING REQUEST %s DID %s:%s USING %s' % (process, thread, file_metadata['request_id'], file_metadata['scope'], file_metadata['name'], external_host)
if eid:
xfers_ret[request_id] = {'scope': file_metadata['scope'],
'name': file_metadata['name'],
'state': RequestState.SUBMITTED,
'external_host': external_host,
'external_id': eid,
'request_type': file.get('request_type', None),
'dst_rse': file_metadata.get('dst_rse', None),
'src_rse': file_metadata.get('src_rse', None),
'src_rse_id': file_metadata['src_rse_id'],
'metadata': file_metadata}
log_str += 'with state(%s) with eid(%s)' % (RequestState.SUBMITTED, eid)
logging.info("%s" % (log_str))
else:
xfers_ret[request_id] = {'scope': file_metadata['scope'],
'name': file_metadata['name'],
'state': RequestState.SUBMISSION_FAILED,
'external_host': external_host,
'external_id': None,
'request_type': file.get('request_type', None),
'dst_rse': file_metadata.get('dst_rse', None),
'src_rse': file_metadata.get('src_rse', None),
'src_rse_id': file_metadata['src_rse_id'],
'metadata': file_metadata}
log_str += 'with state(%s) with eid(%s)' % (RequestState.SUBMISSION_FAILED, None)
logging.warn("%s" % (log_str))
logging.debug("%s:%s start to register transfer state" % (process, thread))
request.set_request_transfers_state(xfers_ret, datetime.datetime.utcnow())
logging.debug("%s:%s finished to register transfer state" % (process, thread))
except:
logging.error("%s:%s Failed to register transfer state with error: %s" % (process, thread, traceback.format_exc()))
try:
if eid:
logging.info("%s:%s Cancel transfer %s on %s" % (process, thread, eid, external_host))
request.cancel_request_external_id(eid, external_host)
except:
logging.error("%s:%s Failed to cancel transfers %s on %s with error: %s" % (process, thread, eid, external_host, traceback.format_exc()))
def schedule_requests():
try:
logging.info("Throttler retrieve requests statistics")
results = request.get_stats_by_activity_dest_state(state=[RequestState.QUEUED, RequestState.SUBMITTING, RequestState.SUBMITTED, RequestState.WAITING])
result_dict = {}
for activity, dest_rse_id, account, state, counter in results:
threshold = request.get_config_limit(activity, dest_rse_id)
if threshold or (counter and (state == RequestState.WAITING)):
if activity not in result_dict:
result_dict[activity] = {}
if dest_rse_id not in result_dict[activity]:
result_dict[activity][dest_rse_id] = {'waiting': 0, 'transfer': 0, 'threshold': threshold, 'accounts': {}}
if account not in result_dict[activity][dest_rse_id]['accounts']:
result_dict[activity][dest_rse_id]['accounts'][account] = {'waiting': 0, 'transfer': 0}
if state == RequestState.WAITING:
result_dict[activity][dest_rse_id]['accounts'][account]['waiting'] += counter
result_dict[activity][dest_rse_id]['waiting'] += counter
else:
result_dict[activity][dest_rse_id]['accounts'][account]['transfer'] += counter
result_dict[activity][dest_rse_id]['transfer'] += counter
for activity in result_dict:
for dest_rse_id in result_dict[activity]:
threshold = result_dict[activity][dest_rse_id]['threshold']
transfer = result_dict[activity][dest_rse_id]['transfer']
waiting = result_dict[activity][dest_rse_id]['waiting']
logging.debug("Request status for %s at %s: %s" % (activity, activity, result_dict[activity][dest_rse_id]))
if threshold is None:
logging.debug("Throttler remove limits(threshold: %s) and release all waiting requests for acitivity %s, rse_id %s" % (threshold, activity, dest_rse_id))
rse_core.delete_rse_transfer_limits(rse=None, activity=activity, rse_id=dest_rse_id)
request.release_waiting_requests(rse=None, activity=activity, rse_id=dest_rse_id)
rse_name = rse_core.get_rse_name(rse_id=dest_rse_id)
record_counter('daemons.conveyor.throttler.delete_rse_transfer_limits.%s.%s' % (activity, rse_name))
elif transfer + waiting > threshold:
logging.debug("Throttler set limits for acitivity %s, rse_id %s" % (activity, dest_rse_id))
rse_core.set_rse_transfer_limits(rse=None, activity=activity, rse_id=dest_rse_id, max_transfers=threshold, transfers=transfer, waitings=waiting)
rse_name = rse_core.get_rse_name(rse_id=dest_rse_id)
record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.%s.%s.max_transfers' % (activity, rse_name), threshold)
record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.%s.%s.transfers' % (activity, rse_name), transfer)
record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.%s.%s.waitings' % (activity, rse_name), waiting)
if transfer < 0.8 * threshold:
# release requests on account
nr_accounts = len(result_dict[activity][dest_rse_id]['accounts'])
if nr_accounts < 1:
nr_accounts = 1
to_release = threshold - transfer
threshold_per_account = math.ceil(threshold / nr_accounts)
to_release_per_account = math.ceil(to_release / nr_accounts)
accounts = result_dict[activity][dest_rse_id]['accounts']
for account in accounts:
if nr_accounts == 1:
logging.debug("Throttler release %s waiting requests for acitivity %s, rse_id %s, account %s " % (to_release, activity, dest_rse_id, account))
request.release_waiting_requests(rse=None, activity=activity, rse_id=dest_rse_id, account=account, count=to_release)
record_gauge('daemons.conveyor.throttler.release_waiting_requests.%s.%s.%s' % (activity, rse_name, account), to_release)
elif accounts[account]['transfer'] > threshold_per_account:
logging.debug("Throttler will not release waiting requests for acitivity %s, rse_id %s, account %s: It queued more transfers than its share " %
(accounts[account]['waiting'], activity, dest_rse_id, account))
nr_accounts -= 1
to_release_per_account = math.ceil(to_release / nr_accounts)
elif accounts[account]['waiting'] < to_release_per_account:
logging.debug("Throttler release %s waiting requests for acitivity %s, rse_id %s, account %s " % (accounts[account]['waiting'], activity, dest_rse_id, account))
request.release_waiting_requests(rse=None, activity=activity, rse_id=dest_rse_id, account=account, count=accounts[account]['waiting'])
record_gauge('daemons.conveyor.throttler.release_waiting_requests.%s.%s.%s' % (activity, rse_name, account), accounts[account]['waiting'])
to_release = to_release - accounts[account]['waiting']
nr_accounts -= 1
to_release_per_account = math.ceil(to_release / nr_accounts)
else:
logging.debug("Throttler release %s waiting requests for acitivity %s, rse_id %s, account %s " % (to_release_per_account, activity, dest_rse_id, account))
request.release_waiting_requests(rse=None, activity=activity, rse_id=dest_rse_id, account=account, count=to_release_per_account)
record_gauge('daemons.conveyor.throttler.release_waiting_requests.%s.%s.%s' % (activity, rse_name, account), to_release_per_account)
to_release = to_release - to_release_per_account
nr_accounts -= 1
elif waiting > 0:
logging.debug("Throttler remove limits(threshold: %s) and release all waiting requests for acitivity %s, rse_id %s" % (threshold, activity, dest_rse_id))
rse_core.delete_rse_transfer_limits(rse=None, activity=activity, rse_id=dest_rse_id)
request.release_waiting_requests(rse=None, activity=activity, rse_id=dest_rse_id)
rse_name = rse_core.get_rse_name(rse_id=dest_rse_id)
record_counter('daemons.conveyor.throttler.delete_rse_transfer_limits.%s.%s' % (activity, rse_name))
except:
logging.warning("Failed to schedule requests, error: %s" % (traceback.format_exc()))
| 54.926625 | 223 | 0.537494 | 0 | 0 | 0 | 0 | 28,227 | 0.359122 | 0 | 0 | 16,959 | 0.215763 |
3830df09c72d6bc67a2ca0f6763f909dda7df0af | 574 | py | Python | list_images.py | mrowacz/digital_api_scripts | bc8758862a6fccc982c9e5ae05e6590e88dc9059 | [
"MIT"
] | null | null | null | list_images.py | mrowacz/digital_api_scripts | bc8758862a6fccc982c9e5ae05e6590e88dc9059 | [
"MIT"
] | null | null | null | list_images.py | mrowacz/digital_api_scripts | bc8758862a6fccc982c9e5ae05e6590e88dc9059 | [
"MIT"
] | null | null | null | import os
import json
import requests
url = "https://api.digitalocean.com/v2/images?per_page=999"
headers = {"Authorization" : "Bearer " + os.environ['DIGITALOCEAN_ACCESS_TOKEN']}
r = requests.get(url, headers=headers)
distros = {}
for entry in r.json()["images"]:
s = "(" + str(entry["id"]) + ") " + entry["name"]
if entry["distribution"] in distros:
distros[entry["distribution"]].append(s)
else:
distros[entry["distribution"]] = [s]
for key in distros:
print key + ":"
for list_elem in distros[key]:
print "\t" + list_elem
| 26.090909 | 81 | 0.634146 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 178 | 0.310105 |
3833598a358309dbc5740aefcbd3d175d6018996 | 8,457 | py | Python | custom_components/journey/__init__.py | intrinseca/journey-custom-component | e29723418c9c6f50ee7c72b0e0e4488e1872a2a2 | [
"MIT"
] | null | null | null | custom_components/journey/__init__.py | intrinseca/journey-custom-component | e29723418c9c6f50ee7c72b0e0e4488e1872a2a2 | [
"MIT"
] | 8 | 2021-12-21T22:11:10.000Z | 2022-02-08T07:46:38.000Z | custom_components/journey/__init__.py | intrinseca/journey-custom-component | e29723418c9c6f50ee7c72b0e0e4488e1872a2a2 | [
"MIT"
] | null | null | null | """
Custom integration to integrate Journey with Home Assistant.
For more details about this integration, please refer to
https://github.com/intrinseca/journey
"""
import asyncio
from dataclasses import dataclass
from datetime import timedelta
import logging
import math
from OSMPythonTools.nominatim import NominatimResult
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import Config, Event, HomeAssistant
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .api import JourneyApiClient
from .const import (
CONF_DESTINATION,
CONF_GMAPS_TOKEN,
CONF_ORIGIN,
CONF_OSM_USERNAME,
DOMAIN,
PLATFORMS,
STARTUP_MESSAGE,
)
from .helpers import get_location_entity, get_location_from_attributes
SCAN_INTERVAL = timedelta(minutes=5)
_LOGGER: logging.Logger = logging.getLogger(__package__)
# pylint: disable=unused-argument
async def async_setup(hass: HomeAssistant, config: Config):
"""Set up this integration using YAML is not supported."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up this integration using UI."""
if hass.data.get(DOMAIN) is None:
hass.data.setdefault(DOMAIN, {})
_LOGGER.info(STARTUP_MESSAGE)
username = entry.data.get(CONF_OSM_USERNAME)
password = entry.data.get(CONF_GMAPS_TOKEN)
origin = entry.data.get(CONF_ORIGIN)
destination = entry.data.get(CONF_DESTINATION)
client = JourneyApiClient(username, password)
coordinator = JourneyDataUpdateCoordinator(
hass, client=client, origin=origin, destination=destination
)
hass.data[DOMAIN][entry.entry_id] = coordinator
for platform in PLATFORMS:
if entry.options.get(platform, True):
hass.async_add_job(
hass.config_entries.async_forward_entry_setup(entry, platform) # type: ignore
)
entry.add_update_listener(async_reload_entry)
return True
@dataclass
class JourneyTravelTime:
"""Container for Journey time data"""
travel_time: dict
destination: str
@property
def travel_time_values(self) -> dict:
"""Flatten the travel time results dictionary"""
if self.travel_time is None:
return {}
return {k: v["value"] for k, v in self.travel_time.items() if k != "status"}
@property
def duration(self):
"""Get the nominal duration of the journey in seconds"""
if self.travel_time_values is None:
return float("nan")
return self.travel_time_values.get("duration", float("nan"))
@property
def duration_min(self):
"""Get the nominal duration of the journey in minutes"""
return round(self.duration / 60) if not math.isnan(self.duration) else None
@property
def duration_in_traffic(self):
"""Get the current duration of the journey in seconds"""
if self.travel_time_values is None:
return float("nan")
return self.travel_time_values.get("duration_in_traffic", self.duration)
@property
def duration_in_traffic_min(self):
"""Get the current duration of the journey in minutes"""
return (
round(self.duration_in_traffic / 60)
if not math.isnan(self.duration_in_traffic)
else None
)
@property
def delay(self):
"""Get the delay to the journey in seconds"""
return self.duration_in_traffic - self.duration
@property
def delay_min(self):
"""Get the delay to the journey in minutes"""
return round(self.delay / 60) if not math.isnan(self.delay) else None
@property
def delay_factor(self):
"""Get the delay to the journey as a percentage"""
return round(100 * self.delay / self.duration) if self.duration > 0 else 0
@dataclass
class JourneyData:
"""Hold the journey data pulled from the APIs"""
origin_reverse_geocode: NominatimResult
travel_time: JourneyTravelTime
@property
def origin_address(self) -> str:
"""Get the suitable address string from the reverse geocoding lookup"""
if self.origin_reverse_geocode is not None:
for key in ["village", "suburb", "town", "city", "state", "country"]:
if key in self.origin_reverse_geocode.address():
return self.origin_reverse_geocode.address()[key]
return "Unknown"
class JourneyDataUpdateCoordinator(DataUpdateCoordinator[JourneyData]): # type: ignore
"""Class to manage fetching data from the API."""
def __init__(
self,
hass: HomeAssistant,
client: JourneyApiClient,
origin: str,
destination: str,
) -> None:
"""Initialize."""
self.api = client
self._origin_entity_id = origin
self._destination_entity_id = destination
async_track_state_change_event(
hass, self._origin_entity_id, self._handle_origin_state_change
)
async_track_state_change_event(
hass, self._destination_entity_id, self._handle_destination_state_change
)
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
update_interval=SCAN_INTERVAL,
update_method=self.update,
request_refresh_debouncer=Debouncer(
hass, _LOGGER, cooldown=1800, immediate=True
),
)
async def _handle_origin_state_change(self, event: Event):
if event.data["old_state"].state == event.data["new_state"].state:
_LOGGER.debug("Origin updated without state change, requesting refresh")
await self.async_request_refresh()
else:
_LOGGER.debug("Origin updated *with* state change, forcing refresh")
await self.async_refresh()
async def _handle_destination_state_change(self, event: Event):
await self.async_refresh()
async def update(self):
"""Update data via library."""
try:
origin_entity = get_location_entity(self.hass, self._origin_entity_id)
origin = get_location_from_attributes(origin_entity)
if origin is not None:
address = await self.api.async_get_address(origin)
else:
_LOGGER.error("Unable to get origin coordinates")
address = None
destination_entity = get_location_entity(
self.hass, self._destination_entity_id
)
if destination_entity is None:
_LOGGER.error("Unable to get destination coordinates")
traveltime = JourneyTravelTime(None, None)
elif origin_entity.entity_id == destination_entity.entity_id:
_LOGGER.info("origin is equal to destination zone")
traveltime = JourneyTravelTime(
{"duration": {"value": 0}, "duration_in_traffic": {"value": 0}},
origin_entity.name,
)
else:
destination = get_location_from_attributes(destination_entity)
destination_name = destination_entity.name
traveltime = JourneyTravelTime(
travel_time=await self.api.async_get_traveltime(
origin, destination
),
destination=destination_name,
)
return JourneyData(address, traveltime)
except Exception as exception:
raise UpdateFailed() from exception
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Handle removal of an entry."""
coordinator = hass.data[DOMAIN][entry.entry_id]
unloaded = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
if platform in coordinator.platforms
]
)
)
if unloaded:
hass.data[DOMAIN].pop(entry.entry_id)
return unloaded
async def async_reload_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Reload config entry."""
await async_unload_entry(hass, entry)
await async_setup_entry(hass, entry)
| 32.526923 | 94 | 0.65366 | 5,586 | 0.660518 | 0 | 0 | 2,460 | 0.290883 | 3,902 | 0.461393 | 1,447 | 0.171101 |
3834a1c892c6add4c60a560b94fbde6a1d618040 | 251 | py | Python | acpki/models/Contract.py | sigurd120/acpki | 5994fe50f97adaddece4f961fe4e888dd7d014f4 | [
"BSD-Source-Code"
] | null | null | null | acpki/models/Contract.py | sigurd120/acpki | 5994fe50f97adaddece4f961fe4e888dd7d014f4 | [
"BSD-Source-Code"
] | null | null | null | acpki/models/Contract.py | sigurd120/acpki | 5994fe50f97adaddece4f961fe4e888dd7d014f4 | [
"BSD-Source-Code"
] | null | null | null | class Contract:
"""
Model class representing a Cisco ACI Contract
"""
def __init__(self, uid, name, dn):
self.uid = uid
self.name = name
self.dn = dn
def equals(self, con):
return self.dn == con.dn
| 20.916667 | 49 | 0.553785 | 250 | 0.996016 | 0 | 0 | 0 | 0 | 0 | 0 | 61 | 0.243028 |
3834fa4b5731010d90ed1b11936d38cd049a51f6 | 1,015 | py | Python | setup.py | debugtalk/hdiff | 6e5c67bf020316d52962f48a6ae81cfb5d0ce8c0 | [
"MIT"
] | 1 | 2017-07-06T07:32:48.000Z | 2017-07-06T07:32:48.000Z | setup.py | debugtalk/hdiff | 6e5c67bf020316d52962f48a6ae81cfb5d0ce8c0 | [
"MIT"
] | null | null | null | setup.py | debugtalk/hdiff | 6e5c67bf020316d52962f48a6ae81cfb5d0ce8c0 | [
"MIT"
] | 1 | 2019-12-10T05:20:09.000Z | 2019-12-10T05:20:09.000Z | #encoding: utf-8
import os
import re
from setuptools import setup, find_packages
# parse version from xdiff/__init__.py
with open(os.path.join(os.path.dirname(__file__), 'xdiff', '__init__.py')) as f:
version = re.compile(r"__version__\s+=\s+'(.*)'", re.I).match(f.read()).group(1)
with open('README.md') as f:
long_description = f.read()
setup(
name='xdiff',
version=version,
description='A CLI tool to compare data structures, files, folders, http responses, etc.',
long_description=long_description,
author='Leo Lee',
author_email='mail@debugtalk.com',
url='https://github.com/debugtalk/xdiff.git',
license='MIT',
packages=find_packages(exclude=['tests']),
install_requires=[
'termcolor',
'PyYAML',
'future'
],
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
entry_points={
'console_scripts': [
'xdiff=xdiff.cli:main'
]
}
)
| 26.710526 | 94 | 0.625616 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 419 | 0.412808 |
3836b50753707e664310c41fdfbe621c72c591ad | 86 | py | Python | recommendations/kujikatsu/249/c.py | takelifetime/competitive-programming | e7cf8ef923ccefad39a1727ca94c610d650fcb76 | [
"BSD-2-Clause"
] | null | null | null | recommendations/kujikatsu/249/c.py | takelifetime/competitive-programming | e7cf8ef923ccefad39a1727ca94c610d650fcb76 | [
"BSD-2-Clause"
] | 1 | 2021-01-02T06:36:51.000Z | 2021-01-02T06:36:51.000Z | recommendations/kujikatsu/249/c.py | takelifetime/competitive-programming | e7cf8ef923ccefad39a1727ca94c610d650fcb76 | [
"BSD-2-Clause"
] | null | null | null | a, b, n = map(int, input().split())
x = min(b - 1, n)
print(a * x // b - a * (x // b)) | 28.666667 | 35 | 0.430233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
3837ed605953a3a84f61221e934440108abfdf2f | 2,790 | py | Python | docs/source/conf.py | ducouloa/ml4ir | 75aeecaff11682a7bd71c5521e59c449c43c3f9f | [
"Apache-2.0"
] | 70 | 2020-02-05T00:42:29.000Z | 2022-03-07T09:33:01.000Z | docs/source/conf.py | ducouloa/ml4ir | 75aeecaff11682a7bd71c5521e59c449c43c3f9f | [
"Apache-2.0"
] | 102 | 2020-01-31T21:12:55.000Z | 2022-03-28T17:04:43.000Z | docs/source/conf.py | ducouloa/ml4ir | 75aeecaff11682a7bd71c5521e59c449c43c3f9f | [
"Apache-2.0"
] | 23 | 2020-02-05T00:43:07.000Z | 2022-02-13T13:33:51.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from typing import List
from recommonmark.transform import AutoStructify
# Set python project root
sys.path.insert(0, os.path.abspath("../../python/"))
# The master toctree document
master_doc = "index"
# -- Project information -----------------------------------------------------
project = "ml4ir"
copyright = "2020, Search Relevance (Salesforce.com, Inc.)"
author = "Search Relevance (Salesforce.com, Inc.)"
# The full version, including alpha/beta/rc tags
release = "0.2.0"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions: List = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"recommonmark",
] # noqa: E501
# Add any paths that contain templates here, relative to this directory.
templates_path: List = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns: List = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme: str = "default"
# Title appended to <title> tag of individual pages
html_title: str = "ml4ir"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path: List = ["_static"]
# Overriding default theme with custom CSS for text wrapping bug on tables
html_context = {
"css_files": ["_static/theme_overrides.css"],
}
def setup(app):
app.add_config_value(
"recommonmark_config", {"enable_eval_rst": True}, True
) # noqa E501
app.add_transform(AutoStructify)
# Use both class definition doc and constructor doc for
# generating sphinx docs for python classes
autoclass_content = "both"
autodoc_member_order = "bysource"
| 32.44186 | 79 | 0.684946 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,190 | 0.784946 |
383841710ce3e85bb05270e42f4b8e80b35e6697 | 1,193 | py | Python | manage.py | ishiland/ArcGIS-License-Tracker | b998bbf9229434f0101f0e211638f0a815c89b73 | [
"MIT"
] | 3 | 2021-01-30T07:08:58.000Z | 2022-01-31T13:36:19.000Z | manage.py | ishiland/flexlm-flask | b998bbf9229434f0101f0e211638f0a815c89b73 | [
"MIT"
] | 2 | 2020-03-16T18:39:41.000Z | 2021-06-24T13:01:55.000Z | manage.py | ishiland/flexlm-flask | b998bbf9229434f0101f0e211638f0a815c89b73 | [
"MIT"
] | 2 | 2018-06-11T20:13:52.000Z | 2019-12-11T21:11:45.000Z | import unittest
from flask_script import Manager, Shell, Server
from app import app, db
from app.fake_populate import populate
manager = Manager(app)
def make_shell_context():
return dict(app=app)
@manager.command
def recreate_db():
"""
Create the SQL database.
"""
db.drop_all()
db.create_all()
db.session.commit()
print("recreated the database")
@manager.command
def fake_populate():
"""
Load dummy data into db
"""
recreate_db()
populate()
print("populated database with dummy data")
@manager.command
def test():
"""
run unit tests
:return: result, successful or not
"""
tests = unittest.TestLoader().discover('tests', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1
@manager.command
def read_once():
"""
a one-time read from the license server.
"""
from app.read_licenses import read
read()
print('Read completed.')
manager.add_command('runserver', Server(threaded=True))
manager.add_command('shell', Shell(make_context=make_shell_context))
if __name__ == '__main__':
manager.run()
| 20.929825 | 71 | 0.673931 | 0 | 0 | 0 | 0 | 809 | 0.678122 | 0 | 0 | 326 | 0.273261 |
3839d2bcd20e33a947c357a2d7aab8f090791caf | 454 | py | Python | learning_journal/routes.py | hcodydibble/pyramid-learning-journal | eb7a59526885a420b6818fcb888497c21674cf76 | [
"MIT"
] | null | null | null | learning_journal/routes.py | hcodydibble/pyramid-learning-journal | eb7a59526885a420b6818fcb888497c21674cf76 | [
"MIT"
] | 3 | 2019-12-26T16:39:40.000Z | 2021-06-01T21:57:09.000Z | learning_journal/routes.py | hcodydibble/pyramid-learning-journal | eb7a59526885a420b6818fcb888497c21674cf76 | [
"MIT"
] | null | null | null | """Routes and URIs."""
def includeme(config):
"""Add routes and their URIs."""
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('home', '/')
config.add_route('about', '/about')
config.add_route('details', '/journal/{id:\d+}')
config.add_route('create', '/journal/new-entry')
config.add_route('update', '/journal/{id:\d+}/edit-entry')
config.add_route('delete', '/journal/{id:\d+}/delete')
| 34.923077 | 66 | 0.640969 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 222 | 0.488987 |
383a7046c35dad93a416c03cbb5e74d2633b5cd2 | 2,231 | py | Python | moto/logs/metric_filters.py | gtourkas/moto | 307104417b579d23d02f670ff55217a2d4a16bee | [
"Apache-2.0"
] | 5,460 | 2015-01-01T01:11:17.000Z | 2022-03-31T23:45:38.000Z | moto/logs/metric_filters.py | gtourkas/moto | 307104417b579d23d02f670ff55217a2d4a16bee | [
"Apache-2.0"
] | 4,475 | 2015-01-05T19:37:30.000Z | 2022-03-31T13:55:12.000Z | moto/logs/metric_filters.py | gtourkas/moto | 307104417b579d23d02f670ff55217a2d4a16bee | [
"Apache-2.0"
] | 1,831 | 2015-01-14T00:00:44.000Z | 2022-03-31T20:30:04.000Z | def find_metric_transformation_by_name(metric_transformations, metric_name):
for metric in metric_transformations:
if metric["metricName"] == metric_name:
return metric
def find_metric_transformation_by_namespace(metric_transformations, metric_namespace):
for metric in metric_transformations:
if metric["metricNamespace"] == metric_namespace:
return metric
class MetricFilters:
def __init__(self):
self.metric_filters = []
def add_filter(
self, filter_name, filter_pattern, log_group_name, metric_transformations
):
self.metric_filters.append(
{
"filterName": filter_name,
"filterPattern": filter_pattern,
"logGroupName": log_group_name,
"metricTransformations": metric_transformations,
}
)
def get_matching_filters(
self, prefix=None, log_group_name=None, metric_name=None, metric_namespace=None
):
result = []
for f in self.metric_filters:
prefix_matches = prefix is None or f["filterName"].startswith(prefix)
log_group_matches = (
log_group_name is None or f["logGroupName"] == log_group_name
)
metric_name_matches = (
metric_name is None
or find_metric_transformation_by_name(
f["metricTransformations"], metric_name
)
)
namespace_matches = (
metric_namespace is None
or find_metric_transformation_by_namespace(
f["metricTransformations"], metric_namespace
)
)
if (
prefix_matches
and log_group_matches
and metric_name_matches
and namespace_matches
):
result.append(f)
return result
def delete_filter(self, filter_name=None, log_group_name=None):
for f in self.metric_filters:
if f["filterName"] == filter_name and f["logGroupName"] == log_group_name:
self.metric_filters.remove(f)
return self.metric_filters
| 33.80303 | 87 | 0.593904 | 1,820 | 0.815778 | 0 | 0 | 0 | 0 | 0 | 0 | 191 | 0.085612 |
383b08a07ce0a7e72dd5bd0304a317d08bcb959e | 1,840 | py | Python | bin/helper/evaluation_config_mixin.py | rubienr/dirindex | ced18a6ed9e42cab2dc870b61ddfa3ac8a6ceacb | [
"Apache-2.0"
] | 1 | 2020-05-15T17:50:45.000Z | 2020-05-15T17:50:45.000Z | bin/helper/evaluation_config_mixin.py | rubienr/dirindex | ced18a6ed9e42cab2dc870b61ddfa3ac8a6ceacb | [
"Apache-2.0"
] | null | null | null | bin/helper/evaluation_config_mixin.py | rubienr/dirindex | ced18a6ed9e42cab2dc870b61ddfa3ac8a6ceacb | [
"Apache-2.0"
] | null | null | null | from configparser import ConfigParser
from .databases_config_mixin import get_configured_db_file_path
##################################################################################################
class EvaluationConfigMixin(object):
##################################################################################################
SECTION_NAME = "evaluation"
EVALUATION_DB_CONFIG_FIELD_NAME = "evaluation_database_file_path"
DEFAULT_EVALUATION_DB_NAME = "evaluation_database.sqlite"
##################################################################################################
def __init__(self, config_parser: ConfigParser):
self._parser = config_parser # type: ConfigParser
self._evaluation_database_file_path = "" # type: str
##################################################################################################
def read_evaluation_config(self):
self.__handle_evaluation_database_path()
print("[{}]".format(EvaluationConfigMixin.SECTION_NAME))
print("\t{} = '{}'".format(EvaluationConfigMixin.EVALUATION_DB_CONFIG_FIELD_NAME,
self._evaluation_database_file_path))
##################################################################################################
def get_evaluation_database_path(self):
return self._evaluation_database_file_path
##################################################################################################
def __handle_evaluation_database_path(self):
self._evaluation_database_file_path = get_configured_db_file_path(
self._parser,
EvaluationConfigMixin.SECTION_NAME,
EvaluationConfigMixin.EVALUATION_DB_CONFIG_FIELD_NAME,
EvaluationConfigMixin.DEFAULT_EVALUATION_DB_NAME)
| 44.878049 | 102 | 0.492391 | 1,635 | 0.888587 | 0 | 0 | 0 | 0 | 0 | 0 | 711 | 0.386413 |
383c4dbc8788963ff0f2227ca34717a2ddb9d2f7 | 334 | py | Python | backend/api/room/urls.py | jeraldlyh/HoloRPG | e835eb1f7a6b18c87007ecf8168d959b4e176a23 | [
"MIT"
] | null | null | null | backend/api/room/urls.py | jeraldlyh/HoloRPG | e835eb1f7a6b18c87007ecf8168d959b4e176a23 | [
"MIT"
] | null | null | null | backend/api/room/urls.py | jeraldlyh/HoloRPG | e835eb1f7a6b18c87007ecf8168d959b4e176a23 | [
"MIT"
] | null | null | null | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .views import DungeonViewSet, RoomViewSet
router = DefaultRouter()
router.register(r"dungeon", DungeonViewSet, basename="dungeon")
router.register(r"room", RoomViewSet, basename="room")
urlpatterns = [
path("", include(router.urls)),
] | 27.833333 | 63 | 0.775449 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.101796 |
383cd794bb8386729f21cbb926a0d719527719f4 | 1,761 | py | Python | makememe/generator/prompts/types/waiting.py | joshbickett/makememe_ai | 7f66d31564515ea9b4490838d4e33d85ba0a260a | [
"MIT"
] | 37 | 2021-12-25T19:55:37.000Z | 2022-03-23T08:28:49.000Z | makememe/generator/prompts/types/waiting.py | joshbickett/makememe_ai | 7f66d31564515ea9b4490838d4e33d85ba0a260a | [
"MIT"
] | null | null | null | makememe/generator/prompts/types/waiting.py | joshbickett/makememe_ai | 7f66d31564515ea9b4490838d4e33d85ba0a260a | [
"MIT"
] | 5 | 2021-12-29T11:42:28.000Z | 2022-03-08T20:49:49.000Z | from makememe.generator.prompts.prompt import Prompt
import datetime
from PIL import Image
from makememe.generator.design.image_manager import Image_Manager
class Waiting(Prompt):
name = "Waiting"
description = "waiting"
def __init__(self):
self.instruction = """
###
Message:I've been waiting for SpaceX to launch the starship for ever
Meme:{"subject": "SpaceX Startship"}
###
Message:I can't wait for makememe.ai to launch, but it's taking a little while
Meme:{"subject": "makememe.ai"}
###
Message:Drakes new album is going to be fire. Why do I have to wait
Meme:{"subject": "Drakes new album"}
###
Message:I want to create an NFT, but opensea.com is taking a while to load
Meme:{"subject": "opensea.com"}
###
"""
def create(self, meme_text):
with Image.open(f"makememe/static/meme_pics/{self.name.lower()}.jpg").convert(
"RGBA"
) as base:
overlay_image = Image_Manager.add_text(
base=base,
text=meme_text["subject"],
position=(600, 950),
font_size=40,
wrapped_width=20,
)
watermark = Image_Manager.add_text(
base=base, text="makememe.ai", position=(30, 1100), font_size=20
)
base = Image.alpha_composite(base, watermark)
out = Image.alpha_composite(base, overlay_image)
if out.mode in ("RGBA", "P"):
out = out.convert("RGB")
# User.objects.filter()
date = datetime.datetime.now()
image_name = f"{date}.jpg"
file_location = f"makememe/static/creations/{image_name}"
out.save(file_location)
return image_name
| 32.611111 | 86 | 0.599091 | 1,601 | 0.909143 | 0 | 0 | 0 | 0 | 0 | 0 | 645 | 0.366269 |
383e3af7fe9618bbfe62c21b134b2caa1c790a2d | 167 | py | Python | .kodi/addons/plugin.audio.radioreference/__init__.py | C6SUMMER/allinclusive-kodi-pi | 8baf247c79526849c640c6e56ca57a708a65bd11 | [
"Apache-2.0"
] | null | null | null | .kodi/addons/plugin.audio.radioreference/__init__.py | C6SUMMER/allinclusive-kodi-pi | 8baf247c79526849c640c6e56ca57a708a65bd11 | [
"Apache-2.0"
] | null | null | null | .kodi/addons/plugin.audio.radioreference/__init__.py | C6SUMMER/allinclusive-kodi-pi | 8baf247c79526849c640c6e56ca57a708a65bd11 | [
"Apache-2.0"
] | 2 | 2018-04-17T17:34:39.000Z | 2020-07-26T03:43:33.000Z | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# broadcastify
#------------------------------------------------------------
| 33.4 | 62 | 0.131737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 163 | 0.976048 |
383febaf1ca2d83f51aaa28742d1adb8350698d6 | 1,212 | py | Python | ubermag/tests/test_basic_logging.py | ubermag/ubermag | 7e53f62ae643842cd117f9031ab54380b8c00cb0 | [
"BSD-3-Clause"
] | 20 | 2019-06-14T07:09:44.000Z | 2022-03-02T17:37:29.000Z | ubermag/tests/test_basic_logging.py | ubermag/ubermag | 7e53f62ae643842cd117f9031ab54380b8c00cb0 | [
"BSD-3-Clause"
] | 42 | 2019-06-13T11:51:40.000Z | 2022-03-16T14:47:19.000Z | ubermag/tests/test_basic_logging.py | ubermag/ubermag | 7e53f62ae643842cd117f9031ab54380b8c00cb0 | [
"BSD-3-Clause"
] | 5 | 2019-06-20T08:18:30.000Z | 2021-08-10T10:55:09.000Z | import ubermag
import logging
def check_levels(level=logging.WARNING, per_package=None):
packages = [
'discretisedfield',
'mag2exp',
'micromagneticdata',
'micromagneticmodel',
'micromagnetictests',
'oommfc',
'ubermagtable',
'ubermagutil',
]
# Root log level should not be modified
assert logging.getLogger('').level == 30
for p in packages:
if per_package is None:
assert logging.getLogger(p).level == level
else:
assert logging.getLogger(p).level == per_package.get(p, level)
def test_setup_logging_default():
ubermag.setup_logging()
check_levels()
def test_setup_logging_levels():
for level in [10, "DEBUG", logging.DEBUG]:
ubermag.setup_logging(level)
check_levels(logging.DEBUG)
def test_setup_logging_per_package():
package_levels = {
'discretisedfield': 0,
'mag2exp': 10,
'micromagneticdata': 20,
'micromagneticmodel': 30,
'micomagnetictests': 40,
'oommfc': 50,
}
ubermag.setup_logging(level=logging.INFO, package_levels=package_levels)
check_levels(logging.INFO, package_levels)
| 24.734694 | 76 | 0.637789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 262 | 0.216172 |
384039dd253dbb45af5fe08ff152328cbf2dabfd | 8,076 | py | Python | python_framework/api/test/api/src/annotation/GlobalExceptionAnnotationTest.py | SamuelJansen/python_framework | a3e57def47c13edd67319f9bbca32be2bbb00f43 | [
"MIT"
] | 5 | 2020-09-02T20:05:44.000Z | 2022-03-04T21:02:13.000Z | python_framework/api/test/api/src/annotation/GlobalExceptionAnnotationTest.py | SamuelJansen/python_framework | a3e57def47c13edd67319f9bbca32be2bbb00f43 | [
"MIT"
] | 1 | 2021-05-23T22:55:58.000Z | 2021-05-24T15:33:50.000Z | python_framework/api/test/api/src/annotation/GlobalExceptionAnnotationTest.py | SamuelJansen/python_framework | a3e57def47c13edd67319f9bbca32be2bbb00f43 | [
"MIT"
] | 3 | 2020-11-01T01:13:09.000Z | 2022-02-22T15:01:19.000Z | from python_helper import log, Test, SettingHelper, RandomHelper, ObjectHelper, TestHelper, ReflectionHelper, Constant
from python_framework import EncapsulateItWithGlobalException, GlobalException, ExceptionHandler, HttpStatus
LOG_HELPER_SETTINGS = {
log.LOG : False,
log.INFO : True,
log.SUCCESS : True,
log.SETTING : True,
log.DEBUG : True,
log.WARNING : True,
log.WRAPPER : True,
log.FAILURE : True,
log.ERROR : True,
log.TEST : False
}
FULL_LOG_HELPER_SETTINGS = {
log.LOG : True,
log.INFO : True,
log.SUCCESS : True,
log.SETTING : True,
log.DEBUG : True,
log.WARNING : True,
log.WRAPPER : True,
log.FAILURE : True,
log.ERROR : True,
log.TEST : False
}
SUCCESS = '__SUCCESS__'
FAILURE = '__FAILURE__'
RAISED_EXCEPTION = Exception(FAILURE)
@EncapsulateItWithGlobalException()
def externalFuncionDoesNotThrowsException():
return SUCCESS
@EncapsulateItWithGlobalException()
def externalFuncionDoesThrowsException():
raise RAISED_EXCEPTION
@Test(environmentVariables={
SettingHelper.ACTIVE_ENVIRONMENT : SettingHelper.LOCAL_ENVIRONMENT,
**FULL_LOG_HELPER_SETTINGS
}
)
def encapsulateItWithGlobalException_noParameters_unknownException() :
#arrange
@EncapsulateItWithGlobalException()
def internalFuncionDoesNotThrowsException():
return SUCCESS
@EncapsulateItWithGlobalException()
def internalFuncionDoesThrowsException():
raise RAISED_EXCEPTION
# #act
externalSuccess = externalFuncionDoesNotThrowsException()
internalSuccess = internalFuncionDoesNotThrowsException()
externalFailure = TestHelper.getRaisedException(externalFuncionDoesThrowsException)
internalFailure = TestHelper.getRaisedException(internalFuncionDoesThrowsException)
# print(externalFailure.logResource)
# print(externalFailure.logResourceMethod)
# print(internalFailure.logResource)
# print(internalFailure.logResourceMethod)
#assert
assert SUCCESS == externalSuccess
assert SUCCESS == internalSuccess
assert not RAISED_EXCEPTION == externalFailure, f'not {RAISED_EXCEPTION} == {externalFailure}: {not RAISED_EXCEPTION == externalFailure}'
assert not RAISED_EXCEPTION == internalFailure, f'not {RAISED_EXCEPTION} == {internalFailure}: {not RAISED_EXCEPTION == internalFailure}'
assert not RAISED_EXCEPTION == externalFailure, f'not {RAISED_EXCEPTION} == {externalFailure}: {not RAISED_EXCEPTION == externalFailure}'
assert not RAISED_EXCEPTION == internalFailure, f'not {RAISED_EXCEPTION} == {internalFailure}: {not RAISED_EXCEPTION == internalFailure}'
assert GlobalException == ReflectionHelper.getClass(externalFailure)
assert GlobalException == ReflectionHelper.getClass(internalFailure)
assert ExceptionHandler.DEFAULT_MESSAGE == externalFailure.message, f'{ExceptionHandler.DEFAULT_LOG_MESSAGE} == {externalFailure.message}: {ExceptionHandler.DEFAULT_LOG_MESSAGE == externalFailure.message}'
assert ExceptionHandler.DEFAULT_MESSAGE == internalFailure.message
assert HttpStatus.INTERNAL_SERVER_ERROR == externalFailure.status
assert HttpStatus.INTERNAL_SERVER_ERROR == internalFailure.status
assert FAILURE == externalFailure.logMessage
assert FAILURE == internalFailure.logMessage
assert ExceptionHandler.DEFAULT_LOG_RESOURCE == externalFailure.logResource, f'{ExceptionHandler.DEFAULT_LOG_RESOURCE} == {externalFailure.logResource}: {ExceptionHandler.DEFAULT_LOG_RESOURCE == externalFailure.logResource}'
assert externalFuncionDoesThrowsException.__name__ == externalFailure.logResourceMethod.__name__, f'{externalFuncionDoesThrowsException} == {externalFailure.logResourceMethod}: {externalFuncionDoesThrowsException == externalFailure.logResourceMethod}'
assert ExceptionHandler.DEFAULT_LOG_RESOURCE == internalFailure.logResource, f'{ExceptionHandler.DEFAULT_LOG_RESOURCE} == {internalFailure.logResource}: {ExceptionHandler.DEFAULT_LOG_RESOURCE == internalFailure.logResource}'
assert type(internalFuncionDoesThrowsException) == type(internalFailure.logResourceMethod)
@Test(environmentVariables={
SettingHelper.ACTIVE_ENVIRONMENT : SettingHelper.LOCAL_ENVIRONMENT,
**FULL_LOG_HELPER_SETTINGS
}
)
def encapsulateItWithGlobalException_noParameters_GlobalException() :
#arrange
class MyClass:
def myMethod(self):
...
resource = MyClass()
ERROR_MESSAGE = 'ERROR_MESSAGE'
LOG_ERROR_MESSAGE = 'LOG_ERROR_MESSAGE'
EXCEPTION_STATUS = HttpStatus.BAD_REQUEST
simpleException = GlobalException(
status = EXCEPTION_STATUS,
message = ERROR_MESSAGE,
logMessage = LOG_ERROR_MESSAGE,
logResource = resource,
logResourceMethod = resource.myMethod
)
@EncapsulateItWithGlobalException()
def internalFuncionDoesThrowsException():
raise simpleException
# #act
internalFailure = TestHelper.getRaisedException(internalFuncionDoesThrowsException)
# print(internalFailure)
# print(internalFailure.logResource)
# print(internalFailure.logResourceMethod)
#assert
assert not RAISED_EXCEPTION == internalFailure, f'not {RAISED_EXCEPTION} == {internalFailure}: {not RAISED_EXCEPTION == internalFailure}'
assert not RAISED_EXCEPTION == internalFailure, f'not {RAISED_EXCEPTION} == {internalFailure}: {not RAISED_EXCEPTION == internalFailure}'
assert GlobalException == ReflectionHelper.getClass(internalFailure)
assert ERROR_MESSAGE == internalFailure.message, f'"{ERROR_MESSAGE}" and "{internalFailure.message}" should be equals'
assert EXCEPTION_STATUS == internalFailure.status
assert LOG_ERROR_MESSAGE == internalFailure.logMessage, f'"{LOG_ERROR_MESSAGE} == {internalFailure.logMessage}": {LOG_ERROR_MESSAGE == internalFailure.logMessage}'
assert resource == internalFailure.logResource, f'"{resource} == {internalFailure.logResource}": {resource == internalFailure.logResource}'
assert resource.myMethod == internalFailure.logResourceMethod, f'"{resource.myMethod} == {internalFailure.logResourceMethod}": {resource.myMethod == internalFailure.logResourceMethod}'
@Test(environmentVariables={
SettingHelper.ACTIVE_ENVIRONMENT : SettingHelper.LOCAL_ENVIRONMENT,
**FULL_LOG_HELPER_SETTINGS
}
)
def encapsulateItWithGlobalException_withParameters_GlobalException() :
#arrange
class MyClass:
def myMethod(self):
...
resource = MyClass()
ERROR_MESSAGE = 'ERROR_MESSAGE'
LOG_ERROR_MESSAGE = 'LOG_ERROR_MESSAGE'
EXCEPTION_STATUS = HttpStatus.BAD_REQUEST
simpleException = Exception(ERROR_MESSAGE)
PERSONALIZED_MESSAGE = 'PERSONALIZED_MESSAGE'
PERSONALIZED_STATUS = HttpStatus.UNAUTHORIZED
@EncapsulateItWithGlobalException(message=PERSONALIZED_MESSAGE, status=PERSONALIZED_STATUS)
def internalFuncionDoesThrowsException():
raise simpleException
#act
internalFailure = TestHelper.getRaisedException(internalFuncionDoesThrowsException)
print(internalFailure)
print(internalFailure.logResource)
print(internalFailure.logResourceMethod)
#assert
assert not RAISED_EXCEPTION == internalFailure, f'not {RAISED_EXCEPTION} == {internalFailure}: {not RAISED_EXCEPTION == internalFailure}'
assert not RAISED_EXCEPTION == internalFailure, f'not {RAISED_EXCEPTION} == {internalFailure}: {not RAISED_EXCEPTION == internalFailure}'
assert GlobalException == ReflectionHelper.getClass(internalFailure)
assert PERSONALIZED_MESSAGE == internalFailure.message, f'{PERSONALIZED_MESSAGE} == {internalFailure.message}: {PERSONALIZED_MESSAGE == internalFailure.message}'
assert PERSONALIZED_STATUS == internalFailure.status
assert ERROR_MESSAGE == internalFailure.logMessage, f'{ERROR_MESSAGE} == {internalFailure.logMessage}: {ERROR_MESSAGE == internalFailure.logMessage}'
assert ExceptionHandler.DEFAULT_LOG_RESOURCE == internalFailure.logResource
assert type(internalFuncionDoesThrowsException) == type(internalFailure.logResourceMethod)
| 48.071429 | 255 | 0.773279 | 116 | 0.014364 | 0 | 0 | 7,236 | 0.895988 | 0 | 0 | 2,321 | 0.287395 |
38410ea065efac093f59cf59aebf388e93e666a8 | 4,007 | py | Python | src/Pybind11Wraps/Neighbor/TreeNeighbor.py | markguozhiming/spheral | bbb982102e61edb8a1d00cf780bfa571835e1b61 | [
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | 1 | 2020-10-21T01:56:55.000Z | 2020-10-21T01:56:55.000Z | src/Pybind11Wraps/Neighbor/TreeNeighbor.py | markguozhiming/spheral | bbb982102e61edb8a1d00cf780bfa571835e1b61 | [
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | null | null | null | src/Pybind11Wraps/Neighbor/TreeNeighbor.py | markguozhiming/spheral | bbb982102e61edb8a1d00cf780bfa571835e1b61 | [
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | null | null | null | #-------------------------------------------------------------------------------
# TreeNeighbor
#-------------------------------------------------------------------------------
from PYB11Generator import *
from Neighbor import *
from NeighborAbstractMethods import *
@PYB11template("Dimension")
class TreeNeighbor(Neighbor):
PYB11typedefs = """
typedef typename TreeNeighbor<%(Dimension)s>::LevelKey LevelKey;
typedef typename TreeNeighbor<%(Dimension)s>::CellKey CellKey;
typedef typename %(Dimension)s::Scalar Scalar;
typedef typename %(Dimension)s::Vector Vector;
typedef typename %(Dimension)s::Tensor Tensor;
typedef typename %(Dimension)s::SymTensor SymTensor;
typedef NodeList<%(Dimension)s> NodeListType;
typedef GridCellIndex<%(Dimension)s> GridCellIndexType;
typedef GeomPlane<%(Dimension)s> Plane;
"""
#...........................................................................
# Constructors
def pyinit(self,
nodeList = "NodeListType&",
searchType = ("const NeighborSearchType", "NeighborSearchType::GatherScatter"),
kernelExtent = ("const double", "2.0"),
xmin = "const Vector&",
xmax = "const Vector&"):
"Construct a TreeNeighbor"
#...........................................................................
# Methods
@PYB11virtual
def reinitialize(self,
xmin = "const Vector&",
xmax = "const Vector&",
htarget = "const Scalar"):
"Reinitialize to possibly more efficient based on the specified box (xmin,xmax) and htarget size"
return "void"
@PYB11const
def gridLevel(self, h="const double&"):
"Find the tree level appropriate for h (units of length)"
return "unsigned"
@PYB11const
def gridLevel(self, H="const SymTensor&"):
"Find the tree level appropriate for H (units of 1/length)"
return "unsigned"
@PYB11const
def dumpTree(self, globalTree="const bool"):
"Return a dump of the tree structure as a string"
return "std::string"
@PYB11const
def dumpTreeStatistics(self, globalTree="const bool"):
"Return a string describing the overall statistics of the tree"
return "std::string"
@PYB11const
def serialize(self, buffer="std::vector<char>&"):
"Serialize the current tree state to a buffer"
return "void"
@PYB11const
def setTreeMasterList(self,
levelID = "const LevelKey",
cellID = "const CellKey",
masterList = "std::vector<int>&",
coarseNeighbors = "std::vector<int>&"):
"For our parallel algorithm it is useful to be able to set the master/coarse information based on the given (level, cell)."
return "void"
@PYB11virtual
@PYB11const
def valid(self):
"Test if the Neighbor is valid, i.e., ready to be queried for connectivity information."
return "bool"
#...........................................................................
# Properties
xmin = PYB11property("const Vector&", "xmin", doc="The minimum coordinate for the simulation bounding box")
xmax = PYB11property("const Vector&", "xmax", doc="The maximum coordinate for the simulation bounding box")
boxLength = PYB11property("double", "boxLength", doc="The maximum current cardinal coordinate distance across the bounding box")
occupiedCells = PYB11property("std::vector<std::vector<CellKey>>", "occupiedCells", doc="The encoded cell key hashes for the cells currently occupied by nodes")
#-------------------------------------------------------------------------------
# Add the virtual interface
#-------------------------------------------------------------------------------
PYB11inject(NeighborAbstractMethods, TreeNeighbor, virtual=True)
| 42.62766 | 164 | 0.550786 | 3,454 | 0.861992 | 0 | 0 | 3,482 | 0.868979 | 0 | 0 | 2,503 | 0.624657 |
384212c693fe183ca43529aa8c41250ed2908324 | 656 | py | Python | hardhat/recipes/tkdiff.py | stangelandcl/hardhat | 1ad0c5dec16728c0243023acb9594f435ef18f9c | [
"MIT"
] | null | null | null | hardhat/recipes/tkdiff.py | stangelandcl/hardhat | 1ad0c5dec16728c0243023acb9594f435ef18f9c | [
"MIT"
] | null | null | null | hardhat/recipes/tkdiff.py | stangelandcl/hardhat | 1ad0c5dec16728c0243023acb9594f435ef18f9c | [
"MIT"
] | null | null | null | from .base import GnuRecipe
class TkDiffRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(TkDiffRecipe, self).__init__(*args, **kwargs)
self.sha256 = '734bb417184c10072eb64e8d27424533' \
'8e41b7fdeff661b5ef30e89f3e3aa357'
self.name = 'tkdiff'
self.version = '4.2'
self.depends = ['tcl', 'tk']
self.url = 'https://downloads.sourceforge.net/project/tkdiff/tkdiff/' \
'$version/tkdiff-$version.tar.gz'
self.install_args = ['cp', 'tkdiff', '%s/bin' % self.prefix_dir]
def configure(self):
pass
def compile(self):
pass
| 29.818182 | 79 | 0.592988 | 625 | 0.952744 | 0 | 0 | 0 | 0 | 0 | 0 | 201 | 0.306402 |
38442d4e520a2a7cf2672966f74e1bda1da32461 | 199 | py | Python | profiles_app/serializers.py | foropolo/task | bf44361537f97db4bd8efe189b16958b6c8f8491 | [
"MIT"
] | null | null | null | profiles_app/serializers.py | foropolo/task | bf44361537f97db4bd8efe189b16958b6c8f8491 | [
"MIT"
] | 7 | 2020-06-06T00:01:49.000Z | 2022-02-10T11:31:54.000Z | profiles_app/serializers.py | foropolo/task | bf44361537f97db4bd8efe189b16958b6c8f8491 | [
"MIT"
] | null | null | null | from rest_framework import serializers
class HelloSerializer(serializers.Serializer):
"""Serializes a name field for testing out APIView"""
city_name = serializers.CharField(max_length=30)
| 28.428571 | 57 | 0.788945 | 157 | 0.788945 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.266332 |
384588ab19b7b9445eb8d0e620a6a304d88dad1c | 62 | py | Python | tests/__init__.py | johnsca/charm-helpers | e1157a1edb7ef2cc478af176086998d68de0b193 | [
"Apache-2.0"
] | null | null | null | tests/__init__.py | johnsca/charm-helpers | e1157a1edb7ef2cc478af176086998d68de0b193 | [
"Apache-2.0"
] | null | null | null | tests/__init__.py | johnsca/charm-helpers | e1157a1edb7ef2cc478af176086998d68de0b193 | [
"Apache-2.0"
] | null | null | null | import sys
import mock
sys.modules['yum'] = mock.MagicMock()
| 12.4 | 37 | 0.725806 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0.080645 |
3847bfe54774d87addfadb9073d4d4d898715bfd | 1,552 | py | Python | steps/app/migrations/0006_auto_20180330_1816.py | VishalCR7/steps | 521e9317b0973795e9f2b98df9d41908ae95e042 | [
"Apache-2.0"
] | null | null | null | steps/app/migrations/0006_auto_20180330_1816.py | VishalCR7/steps | 521e9317b0973795e9f2b98df9d41908ae95e042 | [
"Apache-2.0"
] | null | null | null | steps/app/migrations/0006_auto_20180330_1816.py | VishalCR7/steps | 521e9317b0973795e9f2b98df9d41908ae95e042 | [
"Apache-2.0"
] | 3 | 2018-10-06T11:40:53.000Z | 2018-10-07T18:49:06.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-30 12:46
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0005_auto_20180330_1813'),
]
operations = [
migrations.AlterField(
model_name='incubator',
name='followers',
field=models.ManyToManyField(blank=True, related_name='incubator_follows', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='incubator',
name='incubated_startup',
field=models.ManyToManyField(blank=True, related_name='incubators', through='app.IncubatorStartup', to='app.Startup'),
),
migrations.AlterField(
model_name='incubator',
name='members',
field=models.ManyToManyField(blank=True, related_name='incubator_members', through='app.IncubatorMember', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='incubator',
name='ratings',
field=models.ManyToManyField(blank=True, related_name='rated_incubators', through='app.IncubatorRating', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='startup',
name='members',
field=models.ManyToManyField(blank=True, related_name='startup_members', through='app.StartupMember', to=settings.AUTH_USER_MODEL),
),
]
| 36.952381 | 147 | 0.646907 | 1,363 | 0.878222 | 0 | 0 | 0 | 0 | 0 | 0 | 390 | 0.251289 |
697365290d19012dbb2289c34b92ed5cceafd229 | 2,854 | py | Python | users.py | mavi0/onos-reroute-api | e3a155ce860a944563297c076e6ef4d8ac5a563e | [
"MIT"
] | null | null | null | users.py | mavi0/onos-reroute-api | e3a155ce860a944563297c076e6ef4d8ac5a563e | [
"MIT"
] | null | null | null | users.py | mavi0/onos-reroute-api | e3a155ce860a944563297c076e6ef4d8ac5a563e | [
"MIT"
] | null | null | null | import json, base64
import logging, coloredlogs
import hashlib, copy
from flask_table import Table, Col
logger = logging.getLogger(__name__)
coloredlogs.install(level='INFO')
class Users:
def __init__(self):
self.__users = self.__load_users("json/users.json")
self.__generate_hash_keys()
def __load_json(self, filename):
with open(filename) as f:
return json.load(f)
def __load_users(self, config):
try:
config = self.__load_json(config)
except:
logger.critical("Could not find configuration file: " + config)
return config
def __generate_hash_keys(self):
for user in self.__users.get("users"):
hashed = hashlib.sha256(user.get("api_key").encode())
user["hashed_api_key"] = hashed.hexdigest()
def get_key(self, username):
for user in self.__users.get("users"):
if user.get("username") == username:
return user.get("api_key")
return ""
def get_user(self, key):
for user in self.__users.get("users"):
if user.get("api_key") == key:
return user.get("username")
return ""
def get_hashed_key(self, username):
for user in self.__users.get("users"):
if user.get("username") == username:
return user.get("hashed_api_key")
return ""
def authenticate(self, key):
for user in self.__users.get("users"):
if user.get("api_key") == key:
return True
return False
def get_level(self, key):
# Try to match api_key
for user in self.__users.get("users"):
if user.get("api_key") == key:
return user.get("level")
# Try to match username (fallback - is safe as already authed) --- no it's not
# for user in self.__users.get("users"):
# if user.get("username") == key:
# return user.get("level")
return 0
def get_users(self):
users = copy.deepcopy(self.__users)
for user in users.get("users"):
del user["api_key"]
return users
def get_user_table(self):
items = []
for user in self.get_users().get("users"):
items.append(Item(user.get("username"), user.get("level"), user.get("hashed_api_key")))
table = ItemTable(items)
return table.__html__()
# Declare your table
class ItemTable(Table):
classes = ['table table-dark']
name = Col('Username')
level = Col('Level')
hashed_pass = Col('Hashed API Key')
# Get some objects
class Item(object):
def __init__(self, name, level, hashed_pass):
self.name = name
self.level = level
self.hashed_pass = hashed_pass
| 29.122449 | 99 | 0.575333 | 2,627 | 0.920463 | 0 | 0 | 0 | 0 | 0 | 0 | 578 | 0.202523 |
6974fd265bd84e76fffa8f0a1c0eaed99e207bb7 | 841 | py | Python | tests/test_win_app.py | koyoki/Airtest | ea8391bd4819d9231e7b35f18c14662e6109fad0 | [
"Apache-2.0"
] | 6,140 | 2018-01-24T03:27:48.000Z | 2022-03-31T14:37:54.000Z | tests/test_win_app.py | koyoki/Airtest | ea8391bd4819d9231e7b35f18c14662e6109fad0 | [
"Apache-2.0"
] | 993 | 2018-02-02T11:21:40.000Z | 2022-03-31T20:41:41.000Z | tests/test_win_app.py | koyoki/Airtest | ea8391bd4819d9231e7b35f18c14662e6109fad0 | [
"Apache-2.0"
] | 1,022 | 2018-03-05T07:45:22.000Z | 2022-03-31T04:29:57.000Z | # encoding=utf-8
from airtest.core.win import Windows
import unittest
import numpy
import time
from testconf import try_remove
SNAPSHOT = "win_snapshot.png"
class TestWin(unittest.TestCase):
@classmethod
def setUpClass(cls):
w = Windows()
w.start_app("calc")
time.sleep(1)
cls.windows = Windows(title_re=".*计算器.*".decode("utf-8"))
def test_snapshot(self):
try_remove(SNAPSHOT)
result = self.windows.snapshot(filename=SNAPSHOT)
self.assertIsInstance(result, numpy.ndarray)
try_remove(SNAPSHOT)
def test_touch(self):
self.windows.touch((11, 11))
def test_swipe(self):
self.windows.swipe((11, 11), (100, 100))
@classmethod
def tearDownClass(cls):
cls.windows.app.kill()
if __name__ == '__main__':
unittest.main()
| 21.025 | 65 | 0.649227 | 636 | 0.750885 | 0 | 0 | 252 | 0.297521 | 0 | 0 | 72 | 0.085006 |
6975386566f8d048910ebad189266d0d942c5edc | 2,382 | py | Python | testing_app.py | michal090497/python-fruits-recognition | f9dc8f4d5e682bd68e7df8079c2648dc462f1cad | [
"MIT"
] | 1 | 2022-02-21T18:45:03.000Z | 2022-02-21T18:45:03.000Z | testing_app.py | tomasz-jankowski/python-fruits-recognition | 2ca96e412dee4a586eb8ba5c6b33d0169e5e9231 | [
"MIT"
] | null | null | null | testing_app.py | tomasz-jankowski/python-fruits-recognition | 2ca96e412dee4a586eb8ba5c6b33d0169e5e9231 | [
"MIT"
] | null | null | null | # Import required libraries
import cv2
from os.path import os, dirname
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import random
# List of categories (directories names)
CATEGORIES = ["bad_apple", "bad_grape", "bad_pear", "cherry", "good_apple", "good_avocado", "good_grape", "good_pear", "ripe_avocado"]
# Level 2 - display information about errors only
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Commented line = use GPU
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# Source folder path
main_dir = dirname(os.path.abspath(__file__))
# Paths to image database (train, test, all)
training_dir = os.path.join(main_dir, 'database', 'training')
testing_dir = os.path.join(main_dir, 'database', 'testing')
all_dir = os.path.join(main_dir, 'database', 'all')
# Currently used path
DATADIR = testing_dir
# Load all images and save them to array variable
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path, img))
break
break
# Variable to store training data
testing_data = []
# Function that converts previously created data array to a test data array
def create_testing_data():
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
class_num = CATEGORIES.index(category)
for img in tqdm(os.listdir(path)):
try:
img_array = cv2.imread(os.path.join(path, img))
testing_data.append([img_array, class_num])
except Exception as e:
pass
# Call the function
create_testing_data()
# Shuffle test data
random.shuffle(testing_data)
# Create array variables to store objects and labels
X = []
y = []
# Save objects and labels to arrays
for features, label in testing_data:
X.append(features)
y.append(label)
# Convert arrays to NumPy matrices
X = np.array(X)
y = np.array(y)
# Change the value range from 0-255 to 0-1
X = X / 255.0
# Load the trained model from given path
keras_model_path = os.path.join(main_dir, 'models', 'test')
model = tf.keras.models.load_model(keras_model_path)
# Display model summary
model.summary()
# Display information about the effectiveness of test data classification
loss, acc = model.evaluate(X, y, verbose=2)
print('Accuracy: {:5.2f}%'.format(100 * acc))
print('Loss: {:5.2f}'.format(loss)) | 27.697674 | 134 | 0.70445 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 978 | 0.410579 |
6975897181723160fe0f7a9044875607ac1022c2 | 23,657 | py | Python | scylla/datadog_checks/scylla/metrics.py | tzach/integrations-core | ac9daf60630bea4739947fe1d8df72c20bfcbc22 | [
"BSD-3-Clause"
] | null | null | null | scylla/datadog_checks/scylla/metrics.py | tzach/integrations-core | ac9daf60630bea4739947fe1d8df72c20bfcbc22 | [
"BSD-3-Clause"
] | null | null | null | scylla/datadog_checks/scylla/metrics.py | tzach/integrations-core | ac9daf60630bea4739947fe1d8df72c20bfcbc22 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# metrics namespaced under 'scylla'
SCYLLA_ALIEN = {
'scylla_alien_receive_batch_queue_length': 'alien.receive_batch_queue_length',
'scylla_alien_total_received_messages': 'alien.total_received_messages',
'scylla_alien_total_sent_messages': 'alien.total_sent_messages',
}
SCYLLA_BATCHLOG = {
'scylla_batchlog_manager_total_write_replay_attempts': 'batchlog_manager.total_write_replay_attempts',
}
SCYLLA_CACHE = {
'scylla_cache_active_reads': 'cache.active_reads',
'scylla_cache_bytes_total': 'cache.bytes_total',
'scylla_cache_bytes_used': 'cache.bytes_used',
'scylla_cache_concurrent_misses_same_key': 'cache.concurrent_misses_same_key',
'scylla_cache_mispopulations': 'cache.mispopulations',
'scylla_cache_partition_evictions': 'cache.partition_evictions',
'scylla_cache_partition_hits': 'cache.partition_hits',
'scylla_cache_partition_insertions': 'cache.partition_insertions',
'scylla_cache_partition_merges': 'cache.partition_merges',
'scylla_cache_partition_misses': 'cache.partition_misses',
'scylla_cache_partition_removals': 'cache.partition_removals',
'scylla_cache_partitions': 'cache.partitions',
'scylla_cache_pinned_dirty_memory_overload': 'cache.pinned_dirty_memory_overload',
'scylla_cache_reads': 'cache.reads',
'scylla_cache_reads_with_misses': 'cache.reads_with_misses',
'scylla_cache_row_evictions': 'cache.row_evictions',
'scylla_cache_row_hits': 'cache.row_hits',
'scylla_cache_row_insertions': 'cache.row_insertions',
'scylla_cache_row_misses': 'cache.row_misses',
'scylla_cache_row_removals': 'cache.row_removals',
'scylla_cache_rows': 'cache.rows',
'scylla_cache_rows_dropped_from_memtable': 'cache.rows_dropped_from_memtable',
'scylla_cache_rows_merged_from_memtable': 'cache.rows_merged_from_memtable',
'scylla_cache_rows_processed_from_memtable': 'cache.rows_processed_from_memtable',
'scylla_cache_sstable_partition_skips': 'cache.sstable_partition_skips',
'scylla_cache_sstable_reader_recreations': 'cache.sstable_reader_recreations',
'scylla_cache_sstable_row_skips': 'cache.sstable_row_skips',
'scylla_cache_static_row_insertions': 'cache.static_row_insertions',
}
SCYLLA_COMMITLOG = {
'scylla_commitlog_alloc': 'commitlog.alloc',
'scylla_commitlog_allocating_segments': 'commitlog.allocating_segments',
'scylla_commitlog_bytes_written': 'commitlog.bytes_written',
'scylla_commitlog_cycle': 'commitlog.cycle',
'scylla_commitlog_disk_total_bytes': 'commitlog.disk_total_bytes',
'scylla_commitlog_flush': 'commitlog.flush',
'scylla_commitlog_flush_limit_exceeded': 'commitlog.flush_limit_exceeded',
'scylla_commitlog_memory_buffer_bytes': 'commitlog.memory_buffer_bytes',
'scylla_commitlog_pending_allocations': 'commitlog.pending_allocations',
'scylla_commitlog_pending_flushes': 'commitlog.pending_flushes',
'scylla_commitlog_requests_blocked_memory': 'commitlog.requests_blocked_memory',
'scylla_commitlog_segments': 'commitlog.segments',
'scylla_commitlog_slack': 'commitlog.slack',
'scylla_commitlog_unused_segments': 'commitlog.unused_segments',
}
SCYLLA_COMPACTION = {
'scylla_compaction_manager_compactions': 'compaction_manager.compactions',
}
SCYLLA_CQL = {
'scylla_cql_authorized_prepared_statements_cache_evictions': 'cql.authorized_prepared_statements_cache_evictions',
'scylla_cql_authorized_prepared_statements_cache_size': 'cql.authorized_prepared_statements_cache_size',
'scylla_cql_batches': 'cql.batches',
'scylla_cql_batches_pure_logged': 'cql.batches_pure_logged',
'scylla_cql_batches_pure_unlogged': 'cql.batches_pure_unlogged',
'scylla_cql_batches_unlogged_from_logged': 'cql.batches_unlogged_from_logged',
'scylla_cql_deletes': 'cql.deletes',
'scylla_cql_filtered_read_requests': 'cql.filtered_read_requests',
'scylla_cql_filtered_rows_dropped_total': 'cql.filtered_rows_dropped_total',
'scylla_cql_filtered_rows_matched_total': 'cql.filtered_rows_matched_total',
'scylla_cql_filtered_rows_read_total': 'cql.filtered_rows_read_total',
'scylla_cql_inserts': 'cql.inserts',
'scylla_cql_prepared_cache_evictions': 'cql.prepared_cache_evictions',
'scylla_cql_prepared_cache_memory_footprint': 'cql.prepared_cache_memory_footprint',
'scylla_cql_prepared_cache_size': 'cql.prepared_cache_size',
'scylla_cql_reads': 'cql.reads',
'scylla_cql_reverse_queries': 'cql.reverse_queries',
'scylla_cql_rows_read': 'cql.rows_read',
'scylla_cql_secondary_index_creates': 'cql.secondary_index_creates',
'scylla_cql_secondary_index_drops': 'cql.secondary_index_drops',
'scylla_cql_secondary_index_reads': 'cql.secondary_index_reads',
'scylla_cql_secondary_index_rows_read': 'cql.secondary_index_rows_read',
'scylla_cql_statements_in_batches': 'cql.statements_in_batches',
'scylla_cql_unpaged_select_queries': 'cql.unpaged_select_queries',
'scylla_cql_updates': 'cql.updates',
'scylla_cql_user_prepared_auth_cache_footprint': 'cql.user_prepared_auth_cache_footprint',
}
SCYLLA_DATABASE = {
'scylla_database_active_reads': 'database.active_reads',
'scylla_database_active_reads_memory_consumption': 'database.active_reads_memory_consumption',
'scylla_database_clustering_filter_count': 'database.clustering_filter_count',
'scylla_database_clustering_filter_fast_path_count': 'database.clustering_filter_fast_path_count',
'scylla_database_clustering_filter_sstables_checked': 'database.clustering_filter_sstables_checked',
'scylla_database_clustering_filter_surviving_sstables': 'database.clustering_filter_surviving_sstables',
'scylla_database_counter_cell_lock_acquisition': 'database.counter_cell_lock_acquisition',
'scylla_database_counter_cell_lock_pending': 'database.counter_cell_lock_pending',
'scylla_database_dropped_view_updates': 'database.dropped_view_updates',
'scylla_database_large_partition_exceeding_threshold': 'database.large_partition_exceeding_threshold',
'scylla_database_multishard_query_failed_reader_saves': 'database.multishard_query_failed_reader_saves',
'scylla_database_multishard_query_failed_reader_stops': 'database.multishard_query_failed_reader_stops',
'scylla_database_multishard_query_unpopped_bytes': 'database.multishard_query_unpopped_bytes',
'scylla_database_multishard_query_unpopped_fragments': 'database.multishard_query_unpopped_fragments',
'scylla_database_paused_reads': 'database.paused_reads',
'scylla_database_paused_reads_permit_based_evictions': 'database.paused_reads_permit_based_evictions',
'scylla_database_querier_cache_drops': 'database.querier_cache_drops',
'scylla_database_querier_cache_lookups': 'database.querier_cache_lookups',
'scylla_database_querier_cache_memory_based_evictions': 'database.querier_cache_memory_based_evictions',
'scylla_database_querier_cache_misses': 'database.querier_cache_misses',
'scylla_database_querier_cache_population': 'database.querier_cache_population',
'scylla_database_querier_cache_resource_based_evictions': 'database.querier_cache_resource_based_evictions',
'scylla_database_querier_cache_time_based_evictions': 'database.querier_cache_time_based_evictions',
'scylla_database_queued_reads': 'database.queued_reads',
'scylla_database_requests_blocked_memory': 'database.requests_blocked_memory',
'scylla_database_requests_blocked_memory_current': 'database.requests_blocked_memory_current',
'scylla_database_short_data_queries': 'database.short_data_queries',
'scylla_database_short_mutation_queries': 'database.short_mutation_queries',
'scylla_database_sstable_read_queue_overloads': 'database.sstable_read_queue_overloads',
'scylla_database_total_reads': 'database.total_reads',
'scylla_database_total_reads_failed': 'database.total_reads_failed',
'scylla_database_total_result_bytes': 'database.total_result_bytes',
'scylla_database_total_view_updates_failed_local': 'database.total_view_updates_failed_local',
'scylla_database_total_view_updates_failed_remote': 'database.total_view_updates_failed_remote',
'scylla_database_total_view_updates_pushed_local': 'database.total_view_updates_pushed_local',
'scylla_database_total_view_updates_pushed_remote': 'database.total_view_updates_pushed_remote',
'scylla_database_total_writes': 'database.total_writes',
'scylla_database_total_writes_failed': 'database.total_writes_failed',
'scylla_database_total_writes_timedout': 'database.total_writes_timedout',
'scylla_database_view_building_paused': 'database.view_building_paused',
'scylla_database_view_update_backlog': 'database.view_update_backlog',
}
SCYLLA_EXECUTION = {
'scylla_execution_stages_function_calls_enqueued': 'execution_stages.function_calls_enqueued',
'scylla_execution_stages_function_calls_executed': 'execution_stages.function_calls_executed',
'scylla_execution_stages_tasks_preempted': 'execution_stages.tasks_preempted',
'scylla_execution_stages_tasks_scheduled': 'execution_stages.tasks_scheduled',
}
SCYLLA_GOSSIP = {
'scylla_gossip_heart_beat': 'gossip.heart_beat',
}
SCYLLA_HINTS = {
'scylla_hints_for_views_manager_corrupted_files': 'hints.for_views_manager_corrupted_files',
'scylla_hints_for_views_manager_discarded': 'hints.for_views_manager_discarded',
'scylla_hints_for_views_manager_dropped': 'hints.for_views_manager_dropped',
'scylla_hints_for_views_manager_errors': 'hints.for_views_manager_errors',
'scylla_hints_for_views_manager_sent': 'hints.for_views_manager_sent',
'scylla_hints_for_views_manager_size_of_hints_in_progress': 'hints.for_views_manager_size_of_hints_in_progress',
'scylla_hints_for_views_manager_written': 'hints.for_views_manager_written',
'scylla_hints_manager_corrupted_files': 'hints.manager_corrupted_files',
'scylla_hints_manager_discarded': 'hints.manager_discarded',
'scylla_hints_manager_dropped': 'hints.manager_dropped',
'scylla_hints_manager_errors': 'hints.manager_errors',
'scylla_hints_manager_sent': 'hints.manager_sent',
'scylla_hints_manager_size_of_hints_in_progress': 'hints.manager_size_of_hints_in_progress',
'scylla_hints_manager_written': 'hints.manager_written',
}
SCYLLA_HTTPD = {
'scylla_httpd_connections_current': 'httpd.connections_current',
'scylla_httpd_connections_total': 'httpd.connections_total',
'scylla_httpd_read_errors': 'httpd.read_errors',
'scylla_httpd_reply_errors': 'httpd.reply_errors',
'scylla_httpd_requests_served': 'httpd.requests_served',
}
SCYLLA_IO = {
'scylla_io_queue_delay': 'io_queue.delay',
'scylla_io_queue_queue_length': 'io_queue.queue_length',
'scylla_io_queue_shares': 'io_queue.shares',
'scylla_io_queue_total_bytes': 'io_queue.total_bytes',
'scylla_io_queue_total_operations': 'io_queue.total_operations',
}
SCYLLA_LSA = {
'scylla_lsa_free_space': 'lsa.free_space',
'scylla_lsa_large_objects_total_space_bytes': 'lsa.large_objects_total_space_bytes',
'scylla_lsa_memory_allocated': 'lsa.memory_allocated',
'scylla_lsa_memory_compacted': 'lsa.memory_compacted',
'scylla_lsa_non_lsa_used_space_bytes': 'lsa.non_lsa_used_space_bytes',
'scylla_lsa_occupancy': 'lsa.occupancy',
'scylla_lsa_segments_compacted': 'lsa.segments_compacted',
'scylla_lsa_segments_migrated': 'lsa.segments_migrated',
'scylla_lsa_small_objects_total_space_bytes': 'lsa.small_objects_total_space_bytes',
'scylla_lsa_small_objects_used_space_bytes': 'lsa.small_objects_used_space_bytes',
'scylla_lsa_total_space_bytes': 'lsa.total_space_bytes',
'scylla_lsa_used_space_bytes': 'lsa.used_space_bytes',
}
SCYLLA_MEMORY = {
'scylla_memory_allocated_memory': 'memory.allocated_memory',
'scylla_memory_cross_cpu_free_operations': 'memory.cross_cpu_free_operations',
'scylla_memory_dirty_bytes': 'memory.dirty_bytes',
'scylla_memory_free_memory': 'memory.free_memory',
'scylla_memory_free_operations': 'memory.free_operations',
'scylla_memory_malloc_live_objects': 'memory.malloc_live_objects',
'scylla_memory_malloc_operations': 'memory.malloc_operations',
'scylla_memory_reclaims_operations': 'memory.reclaims_operations',
'scylla_memory_regular_dirty_bytes': 'memory.regular_dirty_bytes',
'scylla_memory_regular_virtual_dirty_bytes': 'memory.regular_virtual_dirty_bytes',
'scylla_memory_streaming_dirty_bytes': 'memory.streaming_dirty_bytes',
'scylla_memory_streaming_virtual_dirty_bytes': 'memory.streaming_virtual_dirty_bytes',
'scylla_memory_system_dirty_bytes': 'memory.system_dirty_bytes',
'scylla_memory_system_virtual_dirty_bytes': 'memory.system_virtual_dirty_bytes',
'scylla_memory_total_memory': 'memory.total_memory',
'scylla_memory_virtual_dirty_bytes': 'memory.virtual_dirty_bytes',
}
SCYLLA_MEMTABLES = {
'scylla_memtables_pending_flushes': 'memtables.pending_flushes',
'scylla_memtables_pending_flushes_bytes': 'memtables.pending_flushes_bytes',
}
SCYLLA_NODE = {
'scylla_node_operation_mode': 'node.operation_mode',
}
SCYLLA_QUERY = {
'scylla_query_processor_queries': 'query_processor.queries',
'scylla_query_processor_statements_prepared': 'query_processor.statements_prepared',
}
SCYLLA_REACTOR = {
'scylla_reactor_aio_bytes_read': 'reactor.aio_bytes_read',
'scylla_reactor_aio_bytes_write': 'reactor.aio_bytes_write',
'scylla_reactor_aio_errors': 'reactor.aio_errors',
'scylla_reactor_aio_reads': 'reactor.aio_reads',
'scylla_reactor_aio_writes': 'reactor.aio_writes',
'scylla_reactor_cpp_exceptions': 'reactor.cpp_exceptions',
'scylla_reactor_cpu_busy_ms': 'reactor.cpu_busy_ms',
'scylla_reactor_cpu_steal_time_ms': 'reactor.cpu_steal_time_ms',
'scylla_reactor_fstream_read_bytes': 'reactor.fstream_read_bytes',
'scylla_reactor_fstream_read_bytes_blocked': 'reactor.fstream_read_bytes_blocked',
'scylla_reactor_fstream_reads': 'reactor.fstream_reads',
'scylla_reactor_fstream_reads_ahead_bytes_discarded': 'reactor.fstream_reads_ahead_bytes_discarded',
'scylla_reactor_fstream_reads_aheads_discarded': 'reactor.fstream_reads_aheads_discarded',
'scylla_reactor_fstream_reads_blocked': 'reactor.fstream_reads_blocked',
'scylla_reactor_fsyncs': 'reactor.fsyncs',
'scylla_reactor_io_queue_requests': 'reactor.io_queue_requests',
'scylla_reactor_io_threaded_fallbacks': 'reactor.io_threaded_fallbacks',
'scylla_reactor_logging_failures': 'reactor.logging_failures',
'scylla_reactor_polls': 'reactor.polls',
'scylla_reactor_tasks_pending': 'reactor.tasks_pending',
'scylla_reactor_tasks_processed': 'reactor.tasks_processed',
'scylla_reactor_timers_pending': 'reactor.timers_pending',
'scylla_reactor_utilization': 'reactor.utilization',
}
SCYLLA_SCHEDULER = {
'scylla_scheduler_queue_length': 'scheduler.queue_length',
'scylla_scheduler_runtime_ms': 'scheduler.runtime_ms',
'scylla_scheduler_shares': 'scheduler.shares',
'scylla_scheduler_tasks_processed': 'scheduler.tasks_processed',
'scylla_scheduler_time_spent_on_task_quota_violations_ms': 'scheduler.time_spent_on_task_quota_violations_ms',
}
SCYLLA_SSTABLES = {
'scylla_sstables_capped_local_deletion_time': 'sstables.capped_local_deletion_time',
'scylla_sstables_capped_tombstone_deletion_time': 'sstables.capped_tombstone_deletion_time',
'scylla_sstables_cell_tombstone_writes': 'sstables.cell_tombstone_writes',
'scylla_sstables_cell_writes': 'sstables.cell_writes',
'scylla_sstables_index_page_blocks': 'sstables.index_page_blocks',
'scylla_sstables_index_page_hits': 'sstables.index_page_hits',
'scylla_sstables_index_page_misses': 'sstables.index_page_misses',
'scylla_sstables_partition_reads': 'sstables.partition_reads',
'scylla_sstables_partition_seeks': 'sstables.partition_seeks',
'scylla_sstables_partition_writes': 'sstables.partition_writes',
'scylla_sstables_range_partition_reads': 'sstables.range_partition_reads',
'scylla_sstables_range_tombstone_writes': 'sstables.range_tombstone_writes',
'scylla_sstables_row_reads': 'sstables.row_reads',
'scylla_sstables_row_writes': 'sstables.row_writes',
'scylla_sstables_single_partition_reads': 'sstables.single_partition_reads',
'scylla_sstables_sstable_partition_reads': 'sstables.sstable_partition_reads',
'scylla_sstables_static_row_writes': 'sstables.static_row_writes',
'scylla_sstables_tombstone_writes': 'sstables.tombstone_writes',
}
SCYLLA_STORAGE = {
# Scylla 3.1
'scylla_storage_proxy_coordinator_background_read_repairs': 'storage.proxy.coordinator_background_read_repairs',
'scylla_storage_proxy_coordinator_background_reads': 'storage.proxy.coordinator_background_reads',
'scylla_storage_proxy_coordinator_background_replica_writes_failed_local_node': 'storage.proxy.coordinator_background_replica_writes_failed_local_node', # noqa E501
'scylla_storage_proxy_coordinator_background_write_bytes': 'storage.proxy.coordinator_background_write_bytes',
'scylla_storage_proxy_coordinator_background_writes': 'storage.proxy.coordinator_background_writes',
'scylla_storage_proxy_coordinator_background_writes_failed': 'storage.proxy.coordinator_background_writes_failed',
'scylla_storage_proxy_coordinator_canceled_read_repairs': 'storage.proxy.coordinator_canceled_read_repairs',
'scylla_storage_proxy_coordinator_completed_reads_local_node': 'storage.proxy.coordinator_completed_reads_local_node', # noqa E501
'scylla_storage_proxy_coordinator_current_throttled_base_writes': 'storage.proxy.coordinator_current_throttled_base_writes', # noqa E501
'scylla_storage_proxy_coordinator_current_throttled_writes': 'storage.proxy.coordinator_current_throttled_writes',
'scylla_storage_proxy_coordinator_foreground_read_repair': 'storage.proxy.coordinator_foreground_read_repair',
'scylla_storage_proxy_coordinator_foreground_reads': 'storage.proxy.coordinator_foreground_reads',
'scylla_storage_proxy_coordinator_foreground_writes': 'storage.proxy.coordinator_foreground_writes',
'scylla_storage_proxy_coordinator_last_mv_flow_control_delay': 'storage.proxy.coordinator_last_mv_flow_control_delay', # noqa E501
'scylla_storage_proxy_coordinator_queued_write_bytes': 'storage.proxy.coordinator_queued_write_bytes',
'scylla_storage_proxy_coordinator_range_timeouts': 'storage.proxy.coordinator_range_timeouts',
'scylla_storage_proxy_coordinator_range_unavailable': 'storage.proxy.coordinator_range_unavailable',
'scylla_storage_proxy_coordinator_read_errors_local_node': 'storage.proxy.coordinator_read_errors_local_node',
'scylla_storage_proxy_coordinator_read_latency': 'storage.proxy.coordinator_read_latency',
'scylla_storage_proxy_coordinator_read_repair_write_attempts_local_node': 'storage.proxy.coordinator_read_repair_write_attempts_local_node', # noqa E501
'scylla_storage_proxy_coordinator_read_retries': 'storage.proxy.coordinator_read_retries',
'scylla_storage_proxy_coordinator_read_timeouts': 'storage.proxy.coordinator_read_timeouts',
'scylla_storage_proxy_coordinator_read_unavailable': 'storage.proxy.coordinator_read_unavailable',
'scylla_storage_proxy_coordinator_reads_local_node': 'storage.proxy.coordinator_reads_local_node',
'scylla_storage_proxy_coordinator_speculative_data_reads': 'storage.proxy.coordinator_speculative_data_reads',
'scylla_storage_proxy_coordinator_speculative_digest_reads': 'storage.proxy.coordinator_speculative_digest_reads',
'scylla_storage_proxy_coordinator_throttled_writes': 'storage.proxy.coordinator_throttled_writes',
'scylla_storage_proxy_coordinator_total_write_attempts_local_node': 'storage.proxy.coordinator_total_write_attempts_local_node', # noqa E501
'scylla_storage_proxy_coordinator_write_errors_local_node': 'storage.proxy.coordinator_write_errors_local_node',
'scylla_storage_proxy_coordinator_write_latency': 'storage.proxy.coordinator_write_latency',
'scylla_storage_proxy_coordinator_write_timeouts': 'storage.proxy.coordinator_write_timeouts',
'scylla_storage_proxy_coordinator_write_unavailable': 'storage.proxy.coordinator_write_unavailable',
'scylla_storage_proxy_replica_cross_shard_ops': 'storage.proxy.replica_cross_shard_ops',
'scylla_storage_proxy_replica_forwarded_mutations': 'storage.proxy.replica_forwarded_mutations',
'scylla_storage_proxy_replica_forwarding_errors': 'storage.proxy.replica_forwarding_errors',
'scylla_storage_proxy_replica_reads': 'storage.proxy.replica_reads',
'scylla_storage_proxy_replica_received_counter_updates': 'storage.proxy.replica_received_counter_updates',
'scylla_storage_proxy_replica_received_mutations': 'storage.proxy.replica_received_mutations',
# Scylla 3.2 - renamed
'scylla_storage_proxy_coordinator_foreground_read_repairs': 'storage.proxy.coordinator_foreground_read_repair',
}
SCYLLA_STREAMING = {
'scylla_streaming_total_incoming_bytes': 'streaming.total_incoming_bytes',
'scylla_streaming_total_outgoing_bytes': 'streaming.total_outgoing_bytes',
}
SCYLLA_THRIFT = {
'scylla_thrift_current_connections': 'thrift.current_connections',
'scylla_thrift_served': 'thrift.served',
'scylla_thrift_thrift_connections': 'thrift.thrift_connections',
}
SCYLLA_TRACING = {
'scylla_tracing_active_sessions': 'tracing.active_sessions',
'scylla_tracing_cached_records': 'tracing.cached_records',
'scylla_tracing_dropped_records': 'tracing.dropped_records',
'scylla_tracing_dropped_sessions': 'tracing.dropped_sessions',
'scylla_tracing_flushing_records': 'tracing.flushing_records',
'scylla_tracing_keyspace_helper_bad_column_family_errors': 'tracing.keyspace_helper_bad_column_family_errors',
'scylla_tracing_keyspace_helper_tracing_errors': 'tracing.keyspace_helper_tracing_errors',
'scylla_tracing_pending_for_write_records': 'tracing.pending_for_write_records',
'scylla_tracing_trace_errors': 'tracing.trace_errors',
'scylla_tracing_trace_records_count': 'tracing.trace_records_count',
}
SCYLLA_TRANSPORT = {
'scylla_transport_cql_connections': 'transport.cql_connections',
'scylla_transport_current_connections': 'transport.current_connections',
'scylla_transport_requests_blocked_memory': 'transport.requests_blocked_memory',
'scylla_transport_requests_blocked_memory_current': 'transport.requests_blocked_memory_current',
'scylla_transport_requests_served': 'transport.requests_served',
'scylla_transport_requests_serving': 'transport.requests_serving',
}
INSTANCE_DEFAULT_METRICS = [
SCYLLA_CACHE,
SCYLLA_COMPACTION,
SCYLLA_GOSSIP,
SCYLLA_NODE,
SCYLLA_REACTOR,
SCYLLA_STORAGE,
SCYLLA_STREAMING,
SCYLLA_TRANSPORT,
]
ADDITIONAL_METRICS_MAP = {
'scylla.alien': SCYLLA_ALIEN,
'scylla.batchlog': SCYLLA_BATCHLOG,
'scylla.commitlog': SCYLLA_COMMITLOG,
'scylla.cql': SCYLLA_CQL,
'scylla.database': SCYLLA_DATABASE,
'scylla.execution': SCYLLA_EXECUTION,
'scylla.hints': SCYLLA_HINTS,
'scylla.httpd': SCYLLA_HTTPD,
'scylla.io': SCYLLA_IO,
'scylla.lsa': SCYLLA_LSA,
'scylla.memory': SCYLLA_MEMORY,
'scylla.memtables': SCYLLA_MEMTABLES,
'scylla.query': SCYLLA_QUERY,
'scylla.scheduler': SCYLLA_SCHEDULER,
'scylla.sstables': SCYLLA_SSTABLES,
'scylla.thrift': SCYLLA_THRIFT,
'scylla.tracing': SCYLLA_TRACING,
}
| 59.739899 | 169 | 0.820518 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20,250 | 0.855983 |
69773b443e3d861e0a9c9951bc4171e69df9c2e7 | 4,820 | py | Python | simple_rest_client/resource.py | Manre/python-simple-rest-client | d8f0a6fa8ea27fcbe17d38059345649c0eabff3b | [
"MIT"
] | 1 | 2021-08-19T00:35:28.000Z | 2021-08-19T00:35:28.000Z | simple_rest_client/resource.py | Manre/python-simple-rest-client | d8f0a6fa8ea27fcbe17d38059345649c0eabff3b | [
"MIT"
] | null | null | null | simple_rest_client/resource.py | Manre/python-simple-rest-client | d8f0a6fa8ea27fcbe17d38059345649c0eabff3b | [
"MIT"
] | null | null | null | import logging
from types import MethodType
from urllib.parse import urljoin
import aiohttp
import requests
from json_encoder import json
from .exceptions import ActionNotFound, ActionURLMatchError
from .models import Request
from .request import make_request, make_async_request
logger = logging.getLogger(__name__)
class BaseResource:
actions = {}
def __init__(self, api_root_url=None, resource_name=None, params=None,
headers=None, timeout=None, append_slash=False,
json_encode_body=False):
self.api_root_url = api_root_url
self.resource_name = resource_name
self.params = params or {}
self.headers = headers or {}
self.timeout = timeout or 3
self.append_slash = append_slash
self.json_encode_body = json_encode_body
self.actions = self.actions or self.default_actions
@property
def default_actions(self):
return {
'list': {
'method': 'GET',
'url': self.resource_name
},
'create': {
'method': 'POST',
'url': self.resource_name
},
'retrieve': {
'method': 'GET',
'url': self.resource_name + '/{}',
},
'update': {
'method': 'PUT',
'url': self.resource_name + '/{}',
},
'partial_update': {
'method': 'PATCH',
'url': self.resource_name + '/{}',
},
'destroy': {
'method': 'DELETE',
'url': self.resource_name + '/{}',
},
}
def get_action(self, action_name):
try:
return self.actions[action_name]
except KeyError:
raise ActionNotFound('action "{}" not found'.format(action_name))
def get_action_full_url(self, action_name, *parts):
action = self.get_action(action_name)
try:
url = action['url'].format(*parts)
except IndexError:
raise ActionURLMatchError('No url match for "{}"'.format(action_name))
if self.append_slash and not url.endswith('/'):
url += '/'
if not self.api_root_url.endswith('/'):
self.api_root_url += '/'
return self.api_root_url + url
def get_action_method(self, action_name):
action = self.get_action(action_name)
return action['method']
class Resource(BaseResource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.session = requests.Session()
for action_name in self.actions.keys():
self.add_action(action_name)
def add_action(self, action_name):
def action_method(self, *args, body=None, params=None, headers=None,
action_name=action_name, **kwargs):
url = self.get_action_full_url(action_name, *args)
method = self.get_action_method(action_name)
if self.json_encode_body and body:
body = json.dumps(body)
request = Request(
url=url,
method=method,
params=params or {},
body=body,
headers=headers or {},
timeout=self.timeout,
kwargs=kwargs
)
request.params.update(self.params)
request.headers.update(self.headers)
return make_request(self.session, request)
setattr(self, action_name, MethodType(action_method, self))
class AsyncResource(BaseResource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for action_name in self.actions.keys():
self.add_action(action_name)
def add_action(self, action_name):
async def action_method(self, *args, body=None, params=None,
headers=None, action_name=action_name,
**kwargs):
url = self.get_action_full_url(action_name, *args)
method = self.get_action_method(action_name)
if self.json_encode_body and body:
body = json.dumps(body)
request = Request(
url=url,
method=method,
params=params or {},
body=body,
headers=headers or {},
timeout=self.timeout,
kwargs=kwargs
)
request.params.update(self.params)
request.headers.update(self.headers)
async with aiohttp.ClientSession() as session:
return await make_async_request(session, request)
setattr(self, action_name, MethodType(action_method, self))
| 33.706294 | 82 | 0.556017 | 4,491 | 0.931743 | 0 | 0 | 802 | 0.16639 | 874 | 0.181328 | 262 | 0.054357 |