blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
34cc1bf223f7f56c695b00d0565cca6f6dc989a5 | 8ee8fe3c2acea497a85428bfb3dfde19e58b2bc3 | /test-examples/nD_mixed.py | 83eb382991ac4b297956c5f27d493014c7047df8 | [
"BSD-3-Clause"
] | permissive | sofroniewn/image-demos | a6e46f08fd4ce621aa96d6b6378b50f63ac2b381 | 2eeeb23f34a47798ae7be0987182724ee3799eb8 | refs/heads/master | 2022-11-02T23:50:23.098830 | 2022-10-30T04:38:19 | 2022-10-30T04:38:19 | 179,378,745 | 11 | 1 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | """
Slide through 3D Volume series in 4D data using the add_volume API
"""
from skimage import data
import numpy as np
import napari
with napari.gui_qt():
blobs = np.asarray(
[
data.binary_blobs(length=64, volume_fraction=0.1, n_dim=3).astype(
float
)
for i in range(10)
]
)
viewer = napari.Viewer()
# add the volume
volume = viewer.add_image(blobs, ndisplay=3)
slice = viewer.add_image(blobs)
slice.dims.sliced = 0
| [
"sofroniewn@gmail.com"
] | sofroniewn@gmail.com |
db2d32e8cc23e04e50efca093c02779c6dae5ac3 | 2ee5578fb824633d7d8f3500726bf2935caff977 | /Windows_Python_packages_for_KLayout/python/shapely/tests/test_creation.py | 88a54c72f1eb04c3886ba39926aa98e46cbed6f0 | [
"MIT"
] | permissive | SiEPIC/SiEPIC-Tools | d214a10e213bc8265eb0b0ab979d1f7a232cdccc | ec841c1c22b8c59982d8759e073fbc54fbdc8c54 | refs/heads/master | 2023-05-23T11:03:13.516258 | 2023-05-15T19:04:55 | 2023-05-15T19:04:55 | 113,897,603 | 103 | 59 | NOASSERTION | 2023-09-13T23:10:11 | 2017-12-11T19:10:13 | Python | UTF-8 | Python | false | false | 16,408 | py | import numpy as np
import pytest
import shapely
# Note: Point is not imported because it is overridden for testing
from shapely import (
GeometryCollection,
GeometryType,
LinearRing,
LineString,
MultiLineString,
MultiPoint,
MultiPolygon,
Polygon,
)
from shapely.testing import assert_geometries_equal
from .common import (
empty_polygon,
geometry_collection,
line_string,
linear_ring,
multi_line_string,
multi_point,
multi_polygon,
point,
polygon,
)
def box_tpl(x1, y1, x2, y2):
return (x2, y1), (x2, y2), (x1, y2), (x1, y1), (x2, y1)
def test_points_from_coords():
actual = shapely.points([[0, 0], [2, 2]])
assert_geometries_equal(actual, [shapely.Point(0, 0), shapely.Point(2, 2)])
def test_points_from_xy():
actual = shapely.points(2, [0, 1])
assert_geometries_equal(actual, [shapely.Point(2, 0), shapely.Point(2, 1)])
def test_points_from_xyz():
actual = shapely.points(1, 1, [0, 1])
assert_geometries_equal(actual, [shapely.Point(1, 1, 0), shapely.Point(1, 1, 1)])
def test_points_invalid_ndim():
with pytest.raises(ValueError, match="dimension should be 2 or 3, got 4"):
shapely.points([0, 1, 2, 3])
with pytest.raises(ValueError, match="dimension should be 2 or 3, got 1"):
shapely.points([0])
@pytest.mark.skipif(shapely.geos_version < (3, 10, 0), reason="GEOS < 3.10")
def test_points_nan_becomes_empty():
actual = shapely.points(np.nan, np.nan)
assert_geometries_equal(actual, shapely.Point())
def test_linestrings_from_coords():
actual = shapely.linestrings([[[0, 0], [1, 1]], [[0, 0], [2, 2]]])
assert_geometries_equal(
actual,
[
LineString([(0, 0), (1, 1)]),
LineString([(0, 0), (2, 2)]),
],
)
def test_linestrings_from_xy():
actual = shapely.linestrings([0, 1], [2, 3])
assert_geometries_equal(actual, LineString([(0, 2), (1, 3)]))
def test_linestrings_from_xy_broadcast():
x = [0, 1] # the same X coordinates for both linestrings
y = [2, 3], [4, 5] # each linestring has a different set of Y coordinates
actual = shapely.linestrings(x, y)
assert_geometries_equal(
actual,
[
LineString([(0, 2), (1, 3)]),
LineString([(0, 4), (1, 5)]),
],
)
def test_linestrings_from_xyz():
actual = shapely.linestrings([0, 1], [2, 3], 0)
assert_geometries_equal(actual, LineString([(0, 2, 0), (1, 3, 0)]))
@pytest.mark.parametrize("dim", [2, 3])
def test_linestrings_buffer(dim):
coords = np.random.randn(10, 3, dim)
coords1 = np.asarray(coords, order="C")
result1 = shapely.linestrings(coords1)
coords2 = np.asarray(coords1, order="F")
result2 = shapely.linestrings(coords2)
assert_geometries_equal(result1, result2)
# creating (.., 8, 8*3) strided array so it uses copyFromArrays
coords3 = np.asarray(np.swapaxes(np.swapaxes(coords, 0, 2), 1, 0), order="F")
coords3 = np.swapaxes(np.swapaxes(coords3, 0, 2), 1, 2)
result3 = shapely.linestrings(coords3)
assert_geometries_equal(result1, result3)
def test_linestrings_invalid_shape_scalar():
with pytest.raises(ValueError):
shapely.linestrings((1, 1))
@pytest.mark.parametrize(
"shape",
[
(2, 1, 2), # 2 linestrings of 1 2D point
(1, 1, 2), # 1 linestring of 1 2D point
(1, 2), # 1 linestring of 1 2D point (scalar)
],
)
def test_linestrings_invalid_shape(shape):
with pytest.raises(shapely.GEOSException):
shapely.linestrings(np.ones(shape))
def test_linestrings_invalid_ndim():
msg = r"The ordinate \(last\) dimension should be 2 or 3, got {}"
coords = np.ones((10, 2, 4), order="C")
with pytest.raises(ValueError, match=msg.format(4)):
shapely.linestrings(coords)
coords = np.ones((10, 2, 4), order="F")
with pytest.raises(ValueError, match=msg.format(4)):
shapely.linestrings(coords)
coords = np.swapaxes(np.swapaxes(np.ones((10, 2, 4)), 0, 2), 1, 0)
coords = np.swapaxes(np.swapaxes(np.asarray(coords, order="F"), 0, 2), 1, 2)
with pytest.raises(ValueError, match=msg.format(4)):
shapely.linestrings(coords)
# too few ordinates
coords = np.ones((10, 2, 1))
with pytest.raises(ValueError, match=msg.format(1)):
shapely.linestrings(coords)
def test_linearrings():
actual = shapely.linearrings(box_tpl(0, 0, 1, 1))
assert_geometries_equal(
actual, LinearRing([(1, 0), (1, 1), (0, 1), (0, 0), (1, 0)])
)
def test_linearrings_from_xy():
actual = shapely.linearrings([0, 1, 2, 0], [3, 4, 5, 3])
assert_geometries_equal(actual, LinearRing([(0, 3), (1, 4), (2, 5), (0, 3)]))
def test_linearrings_unclosed():
actual = shapely.linearrings(box_tpl(0, 0, 1, 1)[:-1])
assert_geometries_equal(
actual, LinearRing([(1, 0), (1, 1), (0, 1), (0, 0), (1, 0)])
)
def test_linearrings_unclosed_all_coords_equal():
actual = shapely.linearrings([(0, 0), (0, 0), (0, 0)])
assert_geometries_equal(actual, LinearRing([(0, 0), (0, 0), (0, 0), (0, 0)]))
def test_linearrings_invalid_shape_scalar():
with pytest.raises(ValueError):
shapely.linearrings((1, 1))
@pytest.mark.parametrize(
"shape",
[
(2, 1, 2), # 2 linearrings of 1 2D point
(1, 1, 2), # 1 linearring of 1 2D point
(1, 2), # 1 linearring of 1 2D point (scalar)
(2, 2, 2), # 2 linearrings of 2 2D points
(1, 2, 2), # 1 linearring of 2 2D points
(2, 2), # 1 linearring of 2 2D points (scalar)
],
)
def test_linearrings_invalid_shape(shape):
coords = np.ones(shape)
with pytest.raises(ValueError):
shapely.linearrings(coords)
# make sure the first coordinate != second coordinate
coords[..., 1] += 1
with pytest.raises(ValueError):
shapely.linearrings(coords)
def test_linearrings_invalid_ndim():
msg = r"The ordinate \(last\) dimension should be 2 or 3, got {}"
coords1 = np.random.randn(10, 3, 4)
with pytest.raises(ValueError, match=msg.format(4)):
shapely.linearrings(coords1)
coords2 = np.hstack((coords1, coords1[:, [0], :]))
with pytest.raises(ValueError, match=msg.format(4)):
shapely.linearrings(coords2)
# too few ordinates
coords3 = np.random.randn(10, 3, 1)
with pytest.raises(ValueError, match=msg.format(1)):
shapely.linestrings(coords3)
def test_linearrings_all_nan():
coords = np.full((4, 2), np.nan)
with pytest.raises(shapely.GEOSException):
shapely.linearrings(coords)
@pytest.mark.parametrize("dim", [2, 3])
@pytest.mark.parametrize("order", ["C", "F"])
def test_linearrings_buffer(dim, order):
coords1 = np.random.randn(10, 4, dim)
coords1 = np.asarray(coords1, order=order)
result1 = shapely.linearrings(coords1)
# with manual closure -> can directly copy from buffer if C order
coords2 = np.hstack((coords1, coords1[:, [0], :]))
coords2 = np.asarray(coords2, order=order)
result2 = shapely.linearrings(coords2)
assert_geometries_equal(result1, result2)
# create scalar -> can also directly copy from buffer if F order
coords3 = np.asarray(coords2[0], order=order)
result3 = shapely.linearrings(coords3)
assert_geometries_equal(result3, result1[0])
def test_polygon_from_linearring():
actual = shapely.polygons(shapely.linearrings(box_tpl(0, 0, 1, 1)))
assert_geometries_equal(actual, Polygon([(1, 0), (1, 1), (0, 1), (0, 0), (1, 0)]))
def test_polygons_none():
assert_geometries_equal(shapely.polygons(None), empty_polygon)
assert_geometries_equal(shapely.polygons(None, holes=[linear_ring]), empty_polygon)
def test_polygons():
actual = shapely.polygons(box_tpl(0, 0, 1, 1))
assert_geometries_equal(actual, Polygon([(1, 0), (1, 1), (0, 1), (0, 0), (1, 0)]))
def test_polygon_no_hole_list_raises():
with pytest.raises(ValueError):
shapely.polygons(box_tpl(0, 0, 10, 10), box_tpl(1, 1, 2, 2))
def test_polygon_no_hole_wrong_type():
with pytest.raises((TypeError, shapely.GEOSException)):
shapely.polygons(point)
def test_polygon_with_hole_wrong_type():
with pytest.raises((TypeError, shapely.GEOSException)):
shapely.polygons(point, [linear_ring])
def test_polygon_wrong_hole_type():
with pytest.raises((TypeError, shapely.GEOSException)):
shapely.polygons(linear_ring, [point])
def test_polygon_with_1_hole():
actual = shapely.polygons(box_tpl(0, 0, 10, 10), [box_tpl(1, 1, 2, 2)])
assert shapely.area(actual) == 99.0
def test_polygon_with_2_holes():
actual = shapely.polygons(
box_tpl(0, 0, 10, 10), [box_tpl(1, 1, 2, 2), box_tpl(3, 3, 4, 4)]
)
assert shapely.area(actual) == 98.0
def test_polygon_with_none_hole():
actual = shapely.polygons(
shapely.linearrings(box_tpl(0, 0, 10, 10)),
[
shapely.linearrings(box_tpl(1, 1, 2, 2)),
None,
shapely.linearrings(box_tpl(3, 3, 4, 4)),
],
)
assert shapely.area(actual) == 98.0
def test_2_polygons_with_same_hole():
actual = shapely.polygons(
[box_tpl(0, 0, 10, 10), box_tpl(0, 0, 5, 5)], [box_tpl(1, 1, 2, 2)]
)
assert shapely.area(actual).tolist() == [99.0, 24.0]
def test_2_polygons_with_2_same_holes():
actual = shapely.polygons(
[box_tpl(0, 0, 10, 10), box_tpl(0, 0, 5, 5)],
[box_tpl(1, 1, 2, 2), box_tpl(3, 3, 4, 4)],
)
assert shapely.area(actual).tolist() == [98.0, 23.0]
def test_2_polygons_with_different_holes():
actual = shapely.polygons(
[box_tpl(0, 0, 10, 10), box_tpl(0, 0, 5, 5)],
[[box_tpl(1, 1, 3, 3)], [box_tpl(1, 1, 2, 2)]],
)
assert shapely.area(actual).tolist() == [96.0, 24.0]
def test_polygons_not_enough_points_in_shell_scalar():
with pytest.raises(ValueError):
shapely.polygons((1, 1))
@pytest.mark.parametrize(
"shape",
[
(2, 1, 2), # 2 linearrings of 1 2D point
(1, 1, 2), # 1 linearring of 1 2D point
(1, 2), # 1 linearring of 1 2D point (scalar)
(2, 2, 2), # 2 linearrings of 2 2D points
(1, 2, 2), # 1 linearring of 2 2D points
(2, 2), # 1 linearring of 2 2D points (scalar)
],
)
def test_polygons_not_enough_points_in_shell(shape):
coords = np.ones(shape)
with pytest.raises(ValueError):
shapely.polygons(coords)
# make sure the first coordinate != second coordinate
coords[..., 1] += 1
with pytest.raises(ValueError):
shapely.polygons(coords)
def test_polygons_not_enough_points_in_holes_scalar():
with pytest.raises(ValueError):
shapely.polygons(np.ones((1, 4, 2)), (1, 1))
@pytest.mark.parametrize(
"shape",
[
(2, 1, 2), # 2 linearrings of 1 2D point
(1, 1, 2), # 1 linearring of 1 2D point
(1, 2), # 1 linearring of 1 2D point (scalar)
(2, 2, 2), # 2 linearrings of 2 2D points
(1, 2, 2), # 1 linearring of 2 2D points
(2, 2), # 1 linearring of 2 2D points (scalar)
],
)
def test_polygons_not_enough_points_in_holes(shape):
coords = np.ones(shape)
with pytest.raises(ValueError):
shapely.polygons(np.ones((1, 4, 2)), coords)
# make sure the first coordinate != second coordinate
coords[..., 1] += 1
with pytest.raises(ValueError):
shapely.polygons(np.ones((1, 4, 2)), coords)
@pytest.mark.parametrize(
"func,expected",
[
(shapely.multipoints, MultiPoint()),
(shapely.multilinestrings, MultiLineString()),
(shapely.multipolygons, MultiPolygon()),
(shapely.geometrycollections, GeometryCollection()),
],
)
def test_create_collection_only_none(func, expected):
actual = func(np.array([None], dtype=object))
assert_geometries_equal(actual, expected)
@pytest.mark.parametrize(
"func,sub_geom",
[
(shapely.multipoints, point),
(shapely.multilinestrings, line_string),
(shapely.multilinestrings, linear_ring),
(shapely.multipolygons, polygon),
(shapely.geometrycollections, point),
(shapely.geometrycollections, line_string),
(shapely.geometrycollections, linear_ring),
(shapely.geometrycollections, polygon),
(shapely.geometrycollections, multi_point),
(shapely.geometrycollections, multi_line_string),
(shapely.geometrycollections, multi_polygon),
(shapely.geometrycollections, geometry_collection),
],
)
def test_create_collection(func, sub_geom):
actual = func([sub_geom, sub_geom])
assert shapely.get_num_geometries(actual) == 2
@pytest.mark.parametrize(
"func,sub_geom",
[
(shapely.multipoints, point),
(shapely.multilinestrings, line_string),
(shapely.multipolygons, polygon),
(shapely.geometrycollections, polygon),
],
)
def test_create_collection_skips_none(func, sub_geom):
actual = func([sub_geom, None, None, sub_geom])
assert shapely.get_num_geometries(actual) == 2
@pytest.mark.parametrize(
"func,sub_geom",
[
(shapely.multipoints, line_string),
(shapely.multipoints, geometry_collection),
(shapely.multipoints, multi_point),
(shapely.multilinestrings, point),
(shapely.multilinestrings, polygon),
(shapely.multilinestrings, multi_line_string),
(shapely.multipolygons, linear_ring),
(shapely.multipolygons, multi_point),
(shapely.multipolygons, multi_polygon),
],
)
def test_create_collection_wrong_geom_type(func, sub_geom):
with pytest.raises(TypeError):
func([sub_geom])
@pytest.mark.parametrize(
"coords,ccw,expected",
[
((0, 0, 1, 1), True, Polygon([(1, 0), (1, 1), (0, 1), (0, 0), (1, 0)])),
(
(0, 0, 1, 1),
False,
Polygon([(0, 0), (0, 1), (1, 1), (1, 0), (0, 0)]),
),
],
)
def test_box(coords, ccw, expected):
actual = shapely.box(*coords, ccw=ccw)
assert_geometries_equal(actual, expected)
@pytest.mark.parametrize(
"coords,ccw,expected",
[
(
(0, 0, [1, 2], [1, 2]),
True,
[
Polygon([(1, 0), (1, 1), (0, 1), (0, 0), (1, 0)]),
Polygon([(2, 0), (2, 2), (0, 2), (0, 0), (2, 0)]),
],
),
(
(0, 0, [1, 2], [1, 2]),
[True, False],
[
Polygon([(1, 0), (1, 1), (0, 1), (0, 0), (1, 0)]),
Polygon([(0, 0), (0, 2), (2, 2), (2, 0), (0, 0)]),
],
),
],
)
def test_box_array(coords, ccw, expected):
actual = shapely.box(*coords, ccw=ccw)
assert_geometries_equal(actual, expected)
@pytest.mark.parametrize(
"coords",
[
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 0, 1, 1],
[0, np.nan, 1, 1],
[0, 0, np.nan, 1],
[0, 0, 1, np.nan],
],
)
def test_box_nan(coords):
assert shapely.box(*coords) is None
def test_prepare():
arr = np.array([shapely.points(1, 1), None, shapely.box(0, 0, 1, 1)])
assert arr[0]._geom_prepared == 0
assert arr[2]._geom_prepared == 0
shapely.prepare(arr)
assert arr[0]._geom_prepared != 0
assert arr[1] is None
assert arr[2]._geom_prepared != 0
# preparing again actually does nothing
original = arr[0]._geom_prepared
shapely.prepare(arr)
assert arr[0]._geom_prepared == original
def test_destroy_prepared():
arr = np.array([shapely.points(1, 1), None, shapely.box(0, 0, 1, 1)])
shapely.prepare(arr)
assert arr[0]._geom_prepared != 0
assert arr[2]._geom_prepared != 0
shapely.destroy_prepared(arr)
assert arr[0]._geom_prepared == 0
assert arr[1] is None
assert arr[2]._geom_prepared == 0
shapely.destroy_prepared(arr) # does not error
@pytest.mark.parametrize("geom_type", [None, GeometryType.MISSING, -1])
def test_empty_missing(geom_type):
actual = shapely.empty((2,), geom_type=geom_type)
assert shapely.is_missing(actual).all()
@pytest.mark.parametrize("geom_type", range(8))
def test_empty(geom_type):
actual = shapely.empty((2,), geom_type=geom_type)
assert (~shapely.is_missing(actual)).all()
assert shapely.is_empty(actual).all()
assert (shapely.get_type_id(actual) == geom_type).all()
| [
"lukasc@ece.ubc.ca"
] | lukasc@ece.ubc.ca |
fc5bba0bfd961caa2e101550321cb8be6eff8fab | 5897e3072b9f5c80c7640504cd893c0b18148ebe | /obj_sql/expressions/__init__.py | 09ef5f5b9bae47f425d809c9b322ef7e0222647f | [] | no_license | nttlong/python-libs | bc12446698b32aceaaf979aa5a95400fb2120e51 | 32e8dd67cd88a12ec808ec3cd178b3fa5a8928c1 | refs/heads/main | 2023-01-12T15:07:11.411690 | 2020-11-21T06:23:16 | 2020-11-21T06:23:16 | 309,013,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | from .fields import field
from .fields_expression import field_expression
from .fields_member import field_member | [
"zugeliang2000@gmail.com"
] | zugeliang2000@gmail.com |
2ef8ad9520e99842ad0aba6bd17b8b532e925ffa | 3cffa92ed70f487a3ea28b73a8f544bbc347e450 | /tests/test_oli.py | 8978a3db7caebc6920c1007df2d339b04994b0e4 | [] | no_license | vahtras/dalmisc | 63962ca86d00c59ea019db2b0e5eebba488f70b5 | 2e344132eaac0d163cdb41a5737baca8e875fc49 | refs/heads/master | 2023-01-31T06:42:38.964477 | 2020-12-15T10:38:29 | 2020-12-15T10:38:29 | 321,618,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | import os
from util.full import unit, init, matrix
from dalmisc import oli
from .common_tests import assert_
def setup():
global suppdir
n, e = os.path.splitext(__file__)
suppdir = n + ".d"
def test_e2n_S():
refe2 = init([
[0.78643356, -0.50624296],
[-0.50624296, 0.78643356]
])
e2 = [oli.e2n(n, tmpdir=suppdir) for n in unit(2)]
assert_(e2, refe2)
def test_s2n_S():
refs2 = matrix.diag([2., -2.])
s2 = [oli.s2n(n, tmpdir=suppdir) for n in unit(2)]
assert_(s2, refs2)
if __name__ == "__main__":
setup()
test_e2n_S()
| [
"vahtras@kth.se"
] | vahtras@kth.se |
f54b6de297ca25b541adb6bd4b12906d8bc8fcfd | 077c91b9d5cb1a6a724da47067483c622ce64be6 | /fuzz_pyretic_mesh_proactive_firewall_no_close_check_loop_mcs_with_max_replays_5/interreplay_135_r_4/interactive_replay_config.py | ce9454be78e968ff18bc201ae28a9357ab12050d | [] | no_license | Spencerx/experiments | 0edd16398725f6fd9365ddbb1b773942e4878369 | aaa98b0f67b0d0c0c826b8a1565916bf97ae3179 | refs/heads/master | 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import InteractiveReplayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./pyretic.py -m p0 pyretic.examples.firewall_for_sts_no_close', label='c1', address='127.0.0.1', cwd='../pyretic', kill_cmd='ps aux | grep -e pox -e pyretic | grep -v simulator | cut -c 9-15 | xargs kill -9')],
topology_class=MeshTopology,
topology_params="num_switches=3",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False,
kill_controllers_on_exit=True)
control_flow = InteractiveReplayer(simulation_config, "experiments/fuzz_pyretic_mesh_proactive_firewall_no_close_check_loop_mcs/interreplay_135_r_4/events.trace")
# wait_on_deterministic_values=False
# delay_flow_mods=False
# Invariant check: 'None'
| [
"cs@cs.berkeley.edu"
] | cs@cs.berkeley.edu |
f6a0fef8c7c0962752e3f0be0a812c8744fff280 | efd8628adc042ae2d58fa89cc31a5c1c80aa94f6 | /bi_conv_lstm/src/convlstm_cell.py | 2fce2759c2fffbd8344d862bd762e4273427b43e | [] | no_license | Xharlie/stochast_dynamic_for_video_infilling | d7e0bfaf8b71cf3f39170793e5a1a50b289aee40 | a825de4c5178f7084925817f0631ac331073866f | refs/heads/master | 2021-03-27T09:11:15.478067 | 2019-10-23T17:59:25 | 2019-10-23T17:59:25 | 110,137,739 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,946 | py | import tensorflow as tf
class ConvLSTMCell(tf.nn.rnn_cell.RNNCell):
"""A LSTM cell with convolutions instead of multiplications.
Reference:
Xingjian, S. H. I., et al. "Convolutional LSTM network: A machine learning approach for precipitation nowcasting." Advances in Neural Information Processing Systems. 2015.
"""
def __init__(self, shape, filters, kernel, forget_bias=1.0, activation=tf.tanh, normalize=True, peephole=True, data_format='channels_last', reuse=None):
super(ConvLSTMCell, self).__init__(_reuse=reuse)
self._kernel = kernel
self._filters = filters
self._forget_bias = forget_bias
self._activation = activation
self._normalize = normalize
self._peephole = peephole
if data_format == 'channels_last':
self._size = tf.TensorShape(shape + [self._filters])
self._feature_axis = self._size.ndims
self._data_format = None
elif data_format == 'channels_first':
self._size = tf.TensorShape([self._filters] + shape)
self._feature_axis = 0
self._data_format = 'NC'
else:
raise ValueError('Unknown data_format')
@property
def state_size(self):
return tf.nn.rnn_cell.LSTMStateTuple(self._size, self._size)
@property
def output_size(self):
return self._size
def call(self, x, state):
c, h = state
x = tf.concat([x, h], axis=self._feature_axis)
n = x.shape[-1].value
m = 4 * self._filters if self._filters > 1 else 4
W = tf.get_variable('kernel', self._kernel + [n, m])
y = tf.nn.convolution(x, W, 'SAME', data_format=self._data_format)
if not self._normalize:
y += tf.get_variable('bias', [m], initializer=tf.zeros_initializer())
j, i, f, o = tf.split(y, 4, axis=self._feature_axis)
if self._peephole:
i += tf.get_variable('W_ci', c.shape[1:]) * c
f += tf.get_variable('W_cf', c.shape[1:]) * c
if self._normalize:
j = tf.contrib.layers.layer_norm(j)
i = tf.contrib.layers.layer_norm(i)
f = tf.contrib.layers.layer_norm(f)
f = tf.sigmoid(f + self._forget_bias)
i = tf.sigmoid(i)
c = c * f + i * self._activation(j)
if self._peephole:
o += tf.get_variable('W_co', c.shape[1:]) * c
if self._normalize:
o = tf.contrib.layers.layer_norm(o)
c = tf.contrib.layers.layer_norm(c)
o = tf.sigmoid(o)
h = o * self._activation(c)
# TODO
#tf.summary.histogram('forget_gate', f)
#tf.summary.histogram('input_gate', i)
#tf.summary.histogram('output_gate', o)
#tf.summary.histogram('cell_state', c)
state = tf.nn.rnn_cell.LSTMStateTuple(c, h)
return h, state
class ConvGRUCell(tf.nn.rnn_cell.RNNCell):
"""A GRU cell with convolutions instead of multiplications."""
def __init__(self, shape, filters, kernel, activation=tf.tanh, normalize=True, data_format='channels_last', reuse=None):
super(ConvGRUCell, self).__init__(_reuse=reuse)
self._filters = filters
self._kernel = kernel
self._activation = activation
self._normalize = normalize
if data_format == 'channels_last':
self._size = tf.TensorShape(shape + [self._filters])
self._feature_axis = self._size.ndims
self._data_format = None
elif data_format == 'channels_first':
self._size = tf.TensorShape([self._filters] + shape)
self._feature_axis = 0
self._data_format = 'NC'
else:
raise ValueError('Unknown data_format')
@property
def state_size(self):
return self._size
@property
def output_size(self):
return self._size
def call(self, x, h):
channels = x.shape[self._feature_axis].value
with tf.variable_scope('gates'):
inputs = tf.concat([x, h], axis=self._feature_axis)
n = channels + self._filters
m = 2 * self._filters if self._filters > 1 else 2
W = tf.get_variable('kernel', self._kernel + [n, m])
y = tf.nn.convolution(inputs, W, 'SAME', data_format=self._data_format)
if self._normalize:
r, u = tf.split(y, 2, axis=self._feature_axis)
r = tf.contrib.layers.layer_norm(r)
u = tf.contrib.layers.layer_norm(u)
else:
y += tf.get_variable('bias', [m], initializer=tf.ones_initializer())
r, u = tf.split(y, 2, axis=self._feature_axis)
r, u = tf.sigmoid(r), tf.sigmoid(u)
# TODO
#tf.summary.histogram('reset_gate', r)
#tf.summary.histogram('update_gate', u)
with tf.variable_scope('candidate'):
inputs = tf.concat([x, r * h], axis=self._feature_axis)
n = channels + self._filters
m = self._filters
W = tf.get_variable('kernel', self._kernel + [n, m])
y = tf.nn.convolution(inputs, W, 'SAME', data_format=self._data_format)
if self._normalize:
y = tf.contrib.layers.layer_norm(y)
else:
y += tf.get_variable('bias', [m], initializer=tf.zeros_initializer())
h = u * h + (1 - u) * self._activation(y)
return h, h | [
"charlie.xu007@yahoo.com"
] | charlie.xu007@yahoo.com |
82ab7c61bcd07b03c93eb2c5495b8867ec737671 | 9b3e46ef2ffd65cccace3e3e3d93438c077e4f9e | /main/dialog/template.py | 12e827125435f1610370d68c18651b38ebcd624b | [
"BSD-2-Clause"
] | permissive | wizadr/cports | 1dd043045fc63b061f803d1992a9ccdc995850ad | be5f4695305d9c00de9d4e252e67db8081690c3e | refs/heads/master | 2023-08-21T11:35:16.710064 | 2021-10-25T00:38:04 | 2021-10-25T00:38:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | pkgname = "dialog"
_mver = "1.3"
_date = "20210621"
pkgver = f"{_mver}.{_date}"
pkgrel = 0
build_style = "gnu_configure"
configure_args = ["--with-ncursesw", "--disable-nls"]
makedepends = ["ncurses-devel"]
pkgdesc = "Tool to display dialog boxes from shell scripts"
maintainer = "q66 <q66@chimera-linux.org>"
license = "LGPL-2.1-only"
url = "https://invisible-island.net/dialog"
source = f"https://invisible-mirror.net/archives/{pkgname}/{pkgname}-{_mver}-{_date}.tgz"
sha256 = "c3af22ccfcd9baca384062108dd9354e86990929ee270c239eef69518c5da7c8"
def post_install(self):
self.rm(self.destdir / "usr/lib", force = True, recursive = True)
| [
"q66@chimera-linux.org"
] | q66@chimera-linux.org |
d039d23102a35433edadb4a67f3a22b65f11d99c | 36901e58fbdeabc7380ae2c0278010b2c51fe54d | /gatheros_event/helpers/event_business.py | 55a6a5bce19c24a740033883ffbe597b2be365cc | [] | no_license | hugoseabra/congressy | e7c43408cea86ce56e3138d8ee9231d838228959 | ac1e9b941f1fac8b7a13dee8a41982716095d3db | refs/heads/master | 2023-07-07T04:44:26.424590 | 2021-08-11T15:47:02 | 2021-08-11T15:47:02 | 395,027,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,384 | py | """
Esses helpers tem como objetivo identificar o tipo do evento:
- Grátis
- Pago
Além de também mudar o tipo do evento e aplicar as configuração das features
disponiveis de acordo com o tipo em si
"""
from gatheros_event.event_state import EventState, EventPayable
from gatheros_event.models import Event
from gatheros_event.event_specifications import Saleable
def is_free_event(event: Event):
if not isinstance(event, Event):
raise Exception("{} '{}' não é uma instancia de Event".format(
event,
event.__class__,
))
return EventState(event).is_free()
def is_paid_event(event: Event):
if not isinstance(event, Event):
raise Exception("{} '{}' não é uma instancia de Event".format(
event,
event.__class__,
))
return EventState(event).is_payable()
def removing_saleable_cause_feature_change(event: Event, candidate,
candidate_type) -> bool:
if not Saleable().is_satisfied_by(candidate):
raise Exception(
"{} '{}' não é uma instancia capaz de ser vendida".format(
candidate,
candidate.__class__,
))
return not EventPayable(exclude=candidate,
exclude_type=candidate_type).is_satisfied_by(event)
| [
"nathan.eua@gmail.com"
] | nathan.eua@gmail.com |
fef0aa9b88d267ea1543943ccc5df703e7c1d7d2 | 45c7693fabf2bf6aa142ea08ed7ec45b9a6aee79 | /apps/belt_app/views.py | 576b7dfeec8067fb6ad1415728705a1827018ea8 | [] | no_license | stuffyUdaya/quote | 95cedd352b493eb29f127cc9be78456652e1cca8 | ae979eeb92b490fa12ee95d4276bef9517286c5a | refs/heads/master | 2021-01-09T06:11:50.343620 | 2017-02-04T14:43:09 | 2017-02-04T14:43:09 | 80,923,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,662 | py | from django.shortcuts import render,redirect
from django.urls import reverse
from django.contrib import messages
from .models import User,Quote,Fav
from django.db.models import Count
def index(request):
return render (request, 'belt_app/index.html')
def process(request):
results = User.objects.userValidator(request.POST['name'],request.POST['alias'],request.POST['email'],request.POST['password'],request.POST['confpassword'],request.POST['dateofbirth'])
if results[0]:
for err in results[1]:
print err
messages.error(request,err)
else:
request.session ['loggedin'] = results[1].id
return redirect('/success')
return redirect('/')
def login(request):
postData ={
'email': request.POST['email'],
'password': request.POST['password']
}
results = User.objects.loginValidator(postData)
if results[0]:
request.session['loggedin'] = results[1].id
return redirect('/success')
else:
messages.error(request,results[1])
return redirect('/')
def success(request):
fav = Fav.objects.filter(user_id = request.session['loggedin'])
quote_other = Quote.objects.all()
for x in fav :
print x.quote_id
quote_other = quote_other.exclude(id = x.quote_id)
print quote_other
context = {
'user': User.objects.get(id = request.session['loggedin']),
# 'quote': Quote.objects.all(),
'fav': Fav.objects.filter(user_id = request.session['loggedin']),
'quote':quote_other
}
return render(request,'belt_app/success.html',context)
def addquote(request,id):
postee_id = id
# user = User.objects.get(id = id)
results = Quote.objects.quoteValidator(request.POST['qname'], request.POST['message'],postee_id)
if results[0]:
for err in results[1]:
print err
messages.error(request,err)
return redirect('/success')
else:
return redirect('/success')
# context = {
# 'add': Quote.objects.create(qname = request.POST['qname'], message = request.POST['message'], postee = user)
# }
def fav(request,id,uid):
favs = Fav.objects.create(user_id = uid, quote_id = id),
return redirect('/success')
def view(request,id):
context= {
'view': Quote.objects.filter(postee_id = id)
}
return render(request,'belt_app/view.html', context)
def remove(request,id,uid):
Fav.objects.get(user_id = uid, quote_id = id ).delete()
return redirect('/success')
def logout(request):
request.session.flush()
return redirect('/')
| [
"udayatummala1@gmail.com"
] | udayatummala1@gmail.com |
31d4b7d24d52dd5c170a20732ea593848cb6621e | 6a95112805b64322953429270a305d01fef3faea | /dist/weewx-3.9.2/bin/weewx/wxservices.py | eb777b1882ae3211879a2917cb29e3b810305037 | [
"GPL-3.0-only",
"GPL-1.0-or-later",
"Apache-2.0"
] | permissive | tomdotorg/docker-weewx | c6d59dc492a9e53f3bc898f7b9f593717092d72c | 7085654f455d39b06acc688738fde27e1f78ad1e | refs/heads/main | 2023-06-08T17:57:44.184399 | 2023-01-30T11:21:23 | 2023-01-30T11:21:23 | 54,113,384 | 21 | 16 | Apache-2.0 | 2022-10-19T23:46:26 | 2016-03-17T11:39:29 | Dockerfile | UTF-8 | Python | false | false | 19,145 | py | #
# Copyright (c) 2009-2016 Tom Keffer <tkeffer@gmail.com>
#
# See the file LICENSE.txt for your full rights.
#
"""Services specific to weather."""
import syslog
import weedb
import weewx.units
import weewx.engine
import weewx.wxformulas
import weeutil.weeutil
from weewx.units import CtoF, mps_to_mph, kph_to_mph, METER_PER_FOOT
class StdWXCalculate(weewx.engine.StdService):
"""Wrapper class for WXCalculate.
A StdService wrapper for a WXCalculate object so it may be called as a
service. This also allows the WXCalculate class to be used elsewhere
without the overheads of running it as a weewx service.
"""
def __init__(self, engine, config_dict):
"""Initialize the service.
Create a WXCalculate object and initialise our bindings.
"""
super(StdWXCalculate, self).__init__(engine, config_dict)
self.calc = WXCalculate(config_dict,
engine.stn_info.altitude_vt,
engine.stn_info.latitude_f,
engine.stn_info.longitude_f,
engine.db_binder)
# we will process both loop and archive events
self.bind(weewx.NEW_LOOP_PACKET, self.new_loop_packet)
self.bind(weewx.NEW_ARCHIVE_RECORD, self.new_archive_record)
def new_loop_packet(self, event):
self.calc.do_calculations(event.packet, 'loop')
def new_archive_record(self, event):
self.calc.do_calculations(event.record, 'archive')
class WXCalculate(object):
"""Add derived quantities to a record.
Derived quantities should depend only on independent observations.
They should not depend on other derived quantities.
There is one situation where dependencies matter: pressure. In the case
where the hardware reports barometer, we must calculate pressure and
altimeter. Since altimeter depends on pressure, pressure must be
calculated before altimeter.
We do not handle the situation where hardware reports altimeter and
we must calculate barometer and pressure.
"""
# these are the quantities that this service knows how to calculate
_dispatch_list = [
'pressure', # pressure must be before altimeter
'barometer',
'altimeter',
'windchill',
'heatindex',
'dewpoint',
'inDewpoint',
'rainRate',
'maxSolarRad',
'cloudbase',
'humidex',
'appTemp',
# 'beaufort',
'ET',
'windrun',
]
def __init__(self, config_dict, alt_vt, lat_f, long_f, db_binder=None):
"""Initialize the calculation service. Sample configuration:
[StdWXCalculate]
data_binding = wx_binding
ignore_zero_wind = True
rain_period = 900 # for rain rate
et_period = 3600 # for evapotranspiration
wind_height = 2.0 # for evapotranspiration. In meters.
atc = 0.8 # for solar radiation RS
nfac = 2 # for solar radiation Bras
max_delta_12h = 1800
[[Calculations]]
windchill = hardware
heatindex = prefer_hardware
dewpoint = software
humidex = None
[[Algorithms]]
altimeter = aaASOS
maxSolarRad = RS
"""
# get any configuration settings
svc_dict = config_dict.get('StdWXCalculate', {'Calculations':{}})
# if there is no Calculations section, then make an empty one
if not 'Calculations' in svc_dict:
svc_dict['Calculations'] = dict()
# database binding for any calculations that need database queries
if db_binder is None:
db_binder = weewx.manager.DBBinder(config_dict)
self.db_binder = db_binder
self.binding = svc_dict.get('data_binding', 'wx_binding')
# window of time to measure rain rate, in seconds
self.rain_period = int(svc_dict.get('rain_period', 900))
# window of time for evapotranspiration calculation, in seconds
self.et_period = int(svc_dict.get('et_period', 3600))
# does zero wind mean no wind direction
self.ignore_zero_wind = weeutil.weeutil.to_bool(svc_dict.get('ignore_zero_wind', True))
# atmospheric transmission coefficient [0.7-0.91]
self.atc = float(svc_dict.get('atc', 0.8))
# Fail hard if out of range:
if not 0.7 <= self.atc <= 0.91:
raise weewx.ViolatedPrecondition("Atmospheric transmission "
"coefficient (%f) out of "
"range [.7-.91]" % self.atc)
# atmospheric turbidity (2=clear, 4-5=smoggy)
self.nfac = float(svc_dict.get('nfac', 2))
# height above ground at which wind is measured, in meters
self.wind_height = float(svc_dict.get('wind_height', 2.0))
# Time window to accept a record 12 hours ago:
self.max_delta_12h = int(svc_dict.get('max_delta_12h', 1800))
# cache the archive interval. nominally the interval is included in
# each record for which calculations are being done. however, if the
# calculation is being done on a loop packet, there will probably be no
# interval field in that packet. the archive_interval is the value
# from the last archive record encountered. the alternative to
# caching is to hard-fail - if a calculation depends on archive
# interval, it would be calculated only for archive records, not
# loop packets. currently this applies only to pressure calculation.
self.archive_interval = None
self.calculations = dict()
# Find out which calculations should be performed.
# We recognize only the names in our dispatch list; others are ignored.
for k in self._dispatch_list:
x = svc_dict['Calculations'].get(k, 'prefer_hardware').lower()
if x in ('hardware', 'software', 'prefer_hardware', 'none'):
self.calculations[k] = x
# determine which algorithms to use for the calculations
self.algorithms = svc_dict.get('Algorithms', {})
self.algorithms.setdefault('altimeter', 'aaNOAA')
self.algorithms.setdefault('maxSolarRad', 'RS')
# various bits we need for internal housekeeping
self.altitude_ft = weewx.units.convert(alt_vt, "foot")[0]
self.altitude_m = weewx.units.convert(alt_vt, "meter")[0]
self.latitude = lat_f
self.longitude = long_f
self.temperature_12h_ago = None
self.ts_12h_ago = None
self.rain_events = []
self.archive_rain_events = []
# report about which values will be calculated...
syslog.syslog(syslog.LOG_INFO, "wxcalculate: The following values will be calculated: %s" %
', '.join(["%s=%s" % (k, self.calculations[k]) for k in self.calculations]))
# ...and which algorithms will be used.
syslog.syslog(syslog.LOG_INFO, "wxcalculate: The following algorithms will be used for calculations: %s" %
', '.join(["%s=%s" % (k, self.algorithms[k]) for k in self.algorithms]))
def do_calculations(self, data_dict, data_type):
if self.ignore_zero_wind:
self.adjust_winddir(data_dict)
data_us = weewx.units.to_US(data_dict)
for obs in self._dispatch_list:
calc = False
if obs in self.calculations:
if self.calculations[obs] == 'software':
calc = True
elif (self.calculations[obs] == 'prefer_hardware' and
(obs not in data_us or data_us[obs] is None)):
calc = True
elif obs not in data_us or data_us[obs] is None:
calc = True
if calc:
getattr(self, 'calc_' + obs)(data_us, data_type)
data_x = weewx.units.to_std_system(data_us, data_dict['usUnits'])
data_dict.update(data_x)
def adjust_winddir(self, data):
"""If wind speed is zero, then the wind direction is undefined.
If there is no wind speed, then there is no wind direction."""
if 'windSpeed' in data and not data.get('windSpeed'):
data['windDir'] = None
if 'windGust' in data and not data.get('windGust'):
data['windGustDir'] = None
def calc_dewpoint(self, data, data_type): # @UnusedVariable
if 'outTemp' in data and 'outHumidity' in data:
data['dewpoint'] = weewx.wxformulas.dewpointF(
data['outTemp'], data['outHumidity'])
def calc_inDewpoint(self, data, data_type): # @UnusedVariable
if 'inTemp' in data and 'inHumidity' in data:
data['inDewpoint'] = weewx.wxformulas.dewpointF(
data['inTemp'], data['inHumidity'])
def calc_windchill(self, data, data_type): # @UnusedVariable
if 'outTemp' in data and 'windSpeed' in data:
data['windchill'] = weewx.wxformulas.windchillF(
data['outTemp'], data['windSpeed'])
def calc_heatindex(self, data, data_type): # @UnusedVariable
if 'outTemp' in data and 'outHumidity' in data:
data['heatindex'] = weewx.wxformulas.heatindexF(
data['outTemp'], data['outHumidity'])
def calc_pressure(self, data, data_type): # @UnusedVariable
interval = self._get_archive_interval(data)
if (interval is not None and 'barometer' in data and
'outTemp' in data and 'outHumidity' in data):
temperature_12h_ago = self._get_temperature_12h(
data['dateTime'], interval)
if (data['barometer'] is not None and
data['outTemp'] is not None and
data['outHumidity'] is not None and
temperature_12h_ago is not None):
data['pressure'] = weewx.uwxutils.uWxUtilsVP.SeaLevelToSensorPressure_12(
data['barometer'], self.altitude_ft,
data['outTemp'], temperature_12h_ago, data['outHumidity'])
def calc_barometer(self, data, data_type): # @UnusedVariable
if 'pressure' in data and 'outTemp' in data:
data['barometer'] = weewx.wxformulas.sealevel_pressure_US(
data['pressure'], self.altitude_ft, data['outTemp'])
def calc_altimeter(self, data, data_type): # @UnusedVariable
if 'pressure' in data:
algo = self.algorithms.get('altimeter', 'aaNOAA')
if not algo.startswith('aa'):
algo = 'aa%s' % algo
data['altimeter'] = weewx.wxformulas.altimeter_pressure_US(
data['pressure'], self.altitude_ft, algorithm=algo)
# rainRate is simply the amount of rain in a period scaled to quantity/hr.
# use a sliding window for the time period and the total rainfall in that
# period for the amount of rain. the window size is controlled by the
# rain_period parameter.
def calc_rainRate(self, data, data_type):
# if this is a loop packet then cull and add to the queue
if data_type == 'loop':
# punt any old events from the loop event list...
if (self.rain_events and self.rain_events[0][0] <= data['dateTime'] - self.rain_period):
events = []
for e in self.rain_events:
if e[0] > data['dateTime'] - self.rain_period:
events.append((e[0], e[1]))
self.rain_events = events
# ...then add new rain event if there is one
if 'rain' in data and data['rain']:
self.rain_events.append((data['dateTime'], data['rain']))
elif data_type == 'archive':
# punt any old events from the archive event list...
if (self.archive_rain_events and self.archive_rain_events[0][0] <= data['dateTime'] - self.rain_period):
events = []
for e in self.archive_rain_events:
if e[0] > data['dateTime'] - self.rain_period:
events.append((e[0], e[1]))
self.archive_rain_events = events
# ...then add new rain event if there is one
if 'rain' in data and data['rain']:
self.archive_rain_events.append((data['dateTime'], data['rain']))
# for both loop and archive, add up the rain...
rainsum = 0
if len(self.rain_events) != 0:
# we have loop rain events so add them up
for e in self.rain_events:
rainsum += e[1]
elif data_type == 'archive':
# no loop rain events but do we have any archive rain events
for e in self.archive_rain_events:
rainsum += e[1]
# ...then divide by the period and scale to an hour
data['rainRate'] = 3600 * rainsum / self.rain_period
def calc_maxSolarRad(self, data, data_type): # @UnusedVariable
algo = self.algorithms.get('maxSolarRad', 'RS')
if algo == 'Bras':
data['maxSolarRad'] = weewx.wxformulas.solar_rad_Bras(
self.latitude, self.longitude, self.altitude_m,
data['dateTime'], self.nfac)
else:
data['maxSolarRad'] = weewx.wxformulas.solar_rad_RS(
self.latitude, self.longitude, self.altitude_m,
data['dateTime'], self.atc)
def calc_cloudbase(self, data, data_type): # @UnusedVariable
if 'outTemp' in data and 'outHumidity' in data:
data['cloudbase'] = weewx.wxformulas.cloudbase_US(
data['outTemp'], data['outHumidity'], self.altitude_ft)
def calc_humidex(self, data, data_type): # @UnusedVariable
if 'outTemp' in data and 'outHumidity' in data:
data['humidex'] = weewx.wxformulas.humidexF(
data['outTemp'], data['outHumidity'])
def calc_appTemp(self, data, data_type): # @UnusedVariable
if 'outTemp' in data and 'outHumidity' in data and 'windSpeed' in data:
data['appTemp'] = weewx.wxformulas.apptempF(
data['outTemp'], data['outHumidity'], data['windSpeed'])
def calc_beaufort(self, data, data_type): # @UnusedVariable
if 'windSpeed' in data:
vt = (data['windSpeed'], "mile_per_hour", "group_speed")
ws_kts = weewx.units.convert(vt, "knot")[0]
data['beaufort'] = weewx.wxformulas.beaufort(ws_kts)
def calc_ET(self, data, data_type):
"""Get maximum and minimum temperatures and average radiation and
wind speed for the indicated period then calculate the amount of
evapotranspiration during the interval. Convert to US units if necessary
since this service operates in US unit system."""
# calculate ET only for archive packets
if data_type != 'archive':
return
end_ts = data['dateTime']
start_ts = end_ts - self.et_period
interval = self._get_archive_interval(data)
try:
dbmanager = self.db_binder.get_manager(self.binding)
r = dbmanager.getSql(
"SELECT"
" MAX(outTemp), MIN(outTemp), AVG(radiation), AVG(windSpeed),"
" MAX(outHumidity), MIN(outHumidity), MAX(usUnits), MIN(usUnits)"
" FROM %s WHERE dateTime>? AND dateTime <=?"
% dbmanager.table_name, (start_ts, end_ts))
# Make sure everything is there:
if r is None or None in r:
data['ET'] = None
return
# Unpack the results
T_max, T_min, rad_avg, wind_avg, rh_max, rh_min, std_unit_min, std_unit_max = r
# Check for mixed units
if std_unit_min != std_unit_max:
syslog.syslog(syslog.LOG_NOTICE, "wxservices: Mixed unit system not allowed in ET calculation")
data['ET'] = None
return
std_unit = std_unit_min
if std_unit == weewx.METRIC or std_unit == weewx.METRICWX:
T_max = CtoF(T_max)
T_min = CtoF(T_min)
if std_unit == weewx.METRICWX:
wind_avg = mps_to_mph(wind_avg)
else:
wind_avg = kph_to_mph(wind_avg)
# Wind height is in meters, so convert it:
height_ft = self.wind_height / METER_PER_FOOT
ET_rate = weewx.wxformulas.evapotranspiration_US(
T_min, T_max, rh_min, rh_max, rad_avg, wind_avg, height_ft,
self.latitude, self.longitude, self.altitude_ft, end_ts)
# The formula returns inches/hour. We need the total ET over the
# archive interval, so multiply by the length of the archive
# interval in hours.
data['ET'] = ET_rate * interval / 3600.0 if ET_rate is not None else None
except ValueError as e:
weeutil.weeutil.log_traceback()
syslog.syslog(syslog.LOG_ERR, "wxservices: Calculation of evapotranspiration failed: %s" % e)
except weedb.DatabaseError:
pass
def calc_windrun(self, data, data_type):
"""Calculate the wind run for the archive interval. """
# calculate windrun only for archive packets
if data_type == 'loop':
return
# Calculate windrun for archive record
if 'windSpeed' in data:
data['windrun'] = data['windSpeed'] * data['interval'] / 60.0 if data['windSpeed'] is not None else None
def _get_archive_interval(self, data):
if 'interval' in data and data['interval']:
# cache the interval so it can be used for loop calculations
self.archive_interval = data['interval'] * 60
return self.archive_interval
def _get_temperature_12h(self, ts, archive_interval):
"""Get the temperature from 12 hours ago. Return None if no
temperature is found. Convert to US if necessary since this
service operates in US unit system."""
ts12 = weeutil.weeutil.startOfInterval(ts - 12 * 3600, archive_interval)
# No need to look up the temperature if we're still in the same
# archive interval:
if ts12 != self.ts_12h_ago:
# We're in a new interval. Hit the database to get the temperature
dbmanager = self.db_binder.get_manager(self.binding)
record = dbmanager.getRecord(ts12, max_delta=self.max_delta_12h)
if record is None:
# Nothing in the database. Set temperature to None.
self.temperature_12h_ago = None
else:
# Convert to US if necessary:
record_US = weewx.units.to_US(record)
self.temperature_12h_ago = record_US['outTemp']
# Save the timestamp
self.ts_12h_ago = ts12
return self.temperature_12h_ago
| [
"tom@tom.org"
] | tom@tom.org |
481fed185285ec1d94f5bd7aa0a787c7faf5be00 | 269feb0a04e10df899b7cf0d37c42fd295fd6ac0 | /41列表排序.py | ee790c8a910030c4a5391fcb591cdb27080b8f1e | [] | no_license | zhangxingxing12138/card | c0134951ded50b7cb8c129c28e07252f35796052 | 793de5c5546143b59f8fd169a4e0c2cea1a5b416 | refs/heads/master | 2020-03-23T11:45:29.070458 | 2018-10-16T00:15:01 | 2018-10-16T00:15:01 | 141,519,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | num_list=[2,5,3,8,10,1]
num_list.sort()
print(num_list)
| [
"you@example.com"
] | you@example.com |
b10b197f934cdecd38564cb304870ecf73768943 | e5329001263e67a4d3c13d57bb91f2502280e206 | /InvTL/lm_py/pypy/jit/metainterp/test/test_ztranslation.py | d5db7f3d2c2fc1f5deaf59451266e18775a8ea35 | [] | no_license | yanhongliu/DARLAB | d9432db6e005a39e33501d7ffffe6e648b95b3fc | f739318c9620b44ef03d155f791c7ed4111d80fa | refs/heads/master | 2021-05-27T19:58:58.458846 | 2014-02-04T12:09:26 | 2014-02-04T12:09:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,277 | py | import py
from pypy.jit.metainterp.warmspot import rpython_ll_meta_interp, ll_meta_interp
from pypy.jit.backend.llgraph import runner
from pypy.rlib.jit import JitDriver, OPTIMIZER_FULL, unroll_parameters
from pypy.rlib.jit import PARAMETERS, dont_look_inside
from pypy.jit.metainterp.jitprof import Profiler
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython.ootypesystem import ootype
class TranslationTest:
CPUClass = None
type_system = None
def test_stuff_translates(self):
# this is a basic test that tries to hit a number of features and their
# translation:
# - jitting of loops and bridges
# - virtualizables
# - set_param interface
# - profiler
# - full optimizer
# - jitdriver hooks
class Frame(object):
_virtualizable2_ = ['i']
def __init__(self, i):
self.i = i
class JitCellCache:
entry = None
jitcellcache = JitCellCache()
def set_jitcell_at(entry):
jitcellcache.entry = entry
def get_jitcell_at():
return jitcellcache.entry
def get_printable_location():
return '(hello world)'
def can_inline():
return False
jitdriver = JitDriver(greens = [], reds = ['frame', 'total'],
virtualizables = ['frame'],
get_jitcell_at=get_jitcell_at,
set_jitcell_at=set_jitcell_at,
get_printable_location=get_printable_location,
can_inline=can_inline)
def f(i):
for param in unroll_parameters:
defl = PARAMETERS[param]
jitdriver.set_param(param, defl)
jitdriver.set_param("threshold", 3)
jitdriver.set_param("trace_eagerness", 2)
total = 0
frame = Frame(i)
while frame.i > 3:
jitdriver.can_enter_jit(frame=frame, total=total)
jitdriver.jit_merge_point(frame=frame, total=total)
total += frame.i
if frame.i >= 20:
frame.i -= 2
frame.i -= 1
return total * 10
res = ll_meta_interp(f, [40], CPUClass=self.CPUClass,
type_system=self.type_system)
assert res == f(40)
res = rpython_ll_meta_interp(f, [40], loops=2, CPUClass=self.CPUClass,
type_system=self.type_system,
optimizer=OPTIMIZER_FULL,
ProfilerClass=Profiler)
assert res == f(40)
def test_external_exception_handling_translates(self):
jitdriver = JitDriver(greens = [], reds = ['n', 'total'])
@dont_look_inside
def f(x):
if x > 20:
return 2
raise ValueError
@dont_look_inside
def g(x):
if x > 15:
raise ValueError
return 2
def main(i):
jitdriver.set_param("threshold", 3)
jitdriver.set_param("trace_eagerness", 2)
total = 0
n = i
while n > 3:
jitdriver.can_enter_jit(n=n, total=total)
jitdriver.jit_merge_point(n=n, total=total)
try:
total += f(n)
except ValueError:
total += 1
try:
total += g(n)
except ValueError:
total -= 1
n -= 1
return total * 10
res = ll_meta_interp(main, [40], CPUClass=self.CPUClass,
type_system=self.type_system)
assert res == main(40)
res = rpython_ll_meta_interp(main, [40], loops=2, CPUClass=self.CPUClass,
type_system=self.type_system,
optimizer=OPTIMIZER_FULL,
ProfilerClass=Profiler)
assert res == main(40)
class TestTranslationLLtype(TranslationTest):
CPUClass = runner.LLtypeCPU
type_system = 'lltype'
| [
"mickg10@gmail.com"
] | mickg10@gmail.com |
a5b0332978d11b16182826ca5685a1f52e032ea8 | afd9c9dd58d0e91b84aab77d24ccf36d8b020f94 | /obonet/io.py | e43afb702a0fc788224648937aaebdb130624ff5 | [
"BSD-2-Clause-Patent"
] | permissive | dhimmel/obonet | 11d8c3b30e77e5910d60733711f28cd756f47d9c | fec6a82e53d01338c85e77039a4dc05288f6ab99 | refs/heads/main | 2023-03-04T00:29:18.215986 | 2023-02-28T17:17:57 | 2023-02-28T17:17:57 | 35,751,761 | 115 | 25 | NOASSERTION | 2022-11-10T11:22:07 | 2015-05-17T04:23:23 | Python | UTF-8 | Python | false | false | 2,137 | py | from __future__ import annotations
import importlib
import io
import logging
import mimetypes
import os
import re
from typing import Callable, TextIO, Union
from urllib.request import urlopen
PathType = Union[str, os.PathLike, TextIO]
def open_read_file(path: PathType, encoding: str | None = None) -> TextIO:
"""
Return a file object from the path. Automatically detects and supports
URLs and compression. If path is pathlike, it's converted to a string.
If path is not a string nor pathlike, it's passed through without
modification. Use encoding to set the text character set encoding.
Use `encoding=None` to use the platform-dependent default locale encoding.
"""
# Convert pathlike objects to string paths
if hasattr(path, "__fspath__"):
path = os.fspath(path)
if not isinstance(path, str):
# Passthrough open file buffers without modification
return path
# Get opener based on file extension
opener = get_opener(path)
# Read from URL
if re.match("^(http|ftp)s?://", path):
with urlopen(path) as response:
content = response.read()
if opener == io.open:
if not encoding:
encoding = response.headers.get_content_charset(failobj="utf-8")
logging.info(f"Will decode content from {path} using {encoding} charset.")
text = content.decode(encoding)
return io.StringIO(text)
else:
compressed_bytes = io.BytesIO(content)
return opener(compressed_bytes, "rt", encoding=encoding)
# Read from file
return opener(path, "rt", encoding=encoding)
compression_to_module = {
"gzip": "gzip",
"bzip2": "bz2",
"xz": "lzma",
}
def get_opener(filename: str) -> Callable[..., TextIO]:
"""
Automatically detect compression and return the file opening function.
"""
_type, compression = mimetypes.guess_type(filename)
if compression is None:
opener = io.open
else:
module = compression_to_module[compression]
opener = importlib.import_module(module).open
return opener
| [
"daniel.himmelstein@gmail.com"
] | daniel.himmelstein@gmail.com |
5b7b72098bba09b1082880311e8f3fdb84f678c9 | 5864e86954a221d52d4fa83a607c71bacf201c5a | /eve/client/script/entities/ActionObjectClient.py | 6e27794a321733b47fbd58e3b427686d66348c3b | [] | no_license | connoryang/1v1dec | e9a2303a01e5a26bf14159112b112be81a6560fd | 404f2cebf13b311e754d45206008918881496370 | refs/heads/master | 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\entities\ActionObjectClient.py
import svc
import localization
class EveActionObjectClientSvc(svc.actionObjectClientSvc):
__guid__ = 'svc.eveActionObjectClientSvc'
__replaceservice__ = 'actionObjectClientSvc'
def SetupComponent(self, entity, component):
infoComponent = entity.GetComponent('info')
if infoComponent and not infoComponent.name and component in self.preservedStates:
recipeRow = cfg.recipes.Get(self.preservedStates[component]['_recipeID'])
infoComponent.name = recipeRow.recipeName
svc.actionObjectClientSvc.SetupComponent(self, entity, component)
def Run(self, *args):
svc.actionObjectClientSvc.Run(self, *args)
def GetActionNodeTranslatedText(self, actionID, fallbackText):
treeNodeNameID = cfg.treeNodes.Get(actionID).treeNodeNameID
return localization.GetByMessageID(treeNodeNameID)
| [
"le02005@163.com"
] | le02005@163.com |
7a177033a21ab52312bd03811c74b9fa2fc6488c | 04164e028417ff8472b9f2bfec0ec45b0888f743 | /development/utilities/qh-interpolate | 23660381db250d30447b73d6e97d1e567b0a7eff | [] | no_license | Huaguiyuan/quantum-honeycomp | c2b810ff5f5e25d41b1f0c1c1ff7ae500b04dc31 | 50deb0e59fffe4031f05094572552ca5be59e741 | refs/heads/master | 2020-03-22T19:09:58.148862 | 2018-07-08T19:51:58 | 2018-07-08T19:51:58 | 140,510,217 | 1 | 2 | null | 2018-07-11T02:20:32 | 2018-07-11T02:20:32 | null | UTF-8 | Python | false | false | 1,527 | #!/usr/bin/python
import numpy as np
import numpy
def write_interpolation(centers=[[0.,0.,0.]], heights=[10.0],name=""):
""" Creates a set of lorentzian at that position and with a height"""
if len(heights)<len(centers):
heights = [10. for i in centers]
fac = 5.0 # factor to mix the gaussians
def wave(x,y):
z = 0.
for (c,h) in zip(centers,heights):
r = ((x-c[0])**2+(y-c[1])**2)*fac # renormalized gaussian
z += h*np.exp(-(r))-c[2]
return z+1.0
xs = [c[0] for c in centers] # get x coordinates
ys = [c[1] for c in centers] # get y coordinates
dxy = (max(xs) - min(xs))/200
try:
n = int(float(sys.argv[2]))
except:
n = 80
print n,"points"
x = np.linspace(min(xs),max(xs),n)
y = np.linspace(min(ys),max(ys),n)
fo = open(name,"w") # open output file
for ix in x:
for iy in y:
iz = wave(ix,iy)
fo.write(str(ix)+" ")
fo.write(str(iy)+" ")
fo.write(str(iz)+"\n")
fo.close()
#centers = np.arange(0.,10.,2.)
import sys
# get the centers of the balls
try:
name = sys.argv[1]
except:
print "Usage qh-interpolate name"
exit() # exit
m = np.genfromtxt(name).transpose()
centers = [[m[0,i],m[1,i],0.] for i in range(len(m[0]))]
heights = m[2]
#heights = read_density()
heights = 4*heights/max(heights)
#heights = np.sqrt(heights)
#balls = [ball(center=c) for c in centers]
write_interpolation(centers=centers,heights=heights,name=name+"-interpolated")
| [
"jose.luis.lado@gmail.com"
] | jose.luis.lado@gmail.com | |
9dc965a73917db5c9bc735c53cda18a3e9a59059 | fb98f1e69cb6e14d804c916c4d4ab45acb4f7215 | /setup.py | 5e81d07c8ab9ac906e6ad46326bb23fed7fc220a | [
"Apache-2.0"
] | permissive | q759729997/qytPython | 6c8f65b954e43ea2a75a1fd1286b7c2e896ba9f2 | f468ef0a4495eb7ce58cb052e0370a8f22bca008 | refs/heads/master | 2020-12-05T13:57:43.794982 | 2020-03-29T15:00:27 | 2020-03-29T15:00:27 | 232,131,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | #!/usr/bin/env python
# coding=utf-8
import os
from setuptools import setup, find_packages
__version__ = None
with open("qytPython/version.py") as f:
exec(f.read())
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('LICENSE', encoding='utf-8') as f:
license = f.read()
with open('requirements.txt', encoding='utf-8') as f:
reqs = f.read()
pkgs = [p for p in find_packages() if p.startswith('qytPython')]
print(pkgs)
# 执行依赖包安装,使用豆瓣源加速
install_cmd = 'pip install -r requirements.txt -i https://pypi.douban.com/simple/'
print(install_cmd)
os.system(install_cmd)
setup(
name='qytPython',
version=__version__,
url='https://github.com/q759729997/qytPython',
description='qytPython: Python tools',
long_description=readme,
long_description_content_type='text/markdown',
license=license,
author='qiaoyongtian',
python_requires='>=3.6',
packages=pkgs,
install_requires=reqs.strip().split('\n'),
)
| [
"qiaoyongtian@qq.com"
] | qiaoyongtian@qq.com |
ab2d4503b34fd674906127afc68cf789f6a4702b | f27f3dc88ea4b777063aa449d882663606e45990 | /pySDC/implementations/collocation_classes/equidistant_spline_right.py | 737e1a3071509125ce1512f83ab8bba7c8498ec7 | [
"BSD-2-Clause"
] | permissive | DmytroSytnyk/pySDC | 81a0a6ff86572d687338c7b0c9d2a274e78cb6ef | 9c7c41ac00411cdc58dfa30be794c3bb77a58293 | refs/heads/master | 2020-04-16T13:58:44.021412 | 2018-11-19T07:55:42 | 2018-11-19T07:55:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,164 | py | from __future__ import division
import numpy as np
import scipy.interpolate as intpl
from pySDC.core.Collocation import CollBase
from pySDC.core.Errors import CollocationError
class EquidistantSpline_Right(CollBase):
"""
Implements equidistant nodes with right end point included and spline interpolation
Attributes:
order (int): order of the quadrature
num_nodes (int): number of collocation nodes
tleft (float): left interval point
tright (float): right interval point
nodes (numpy.ndarray): array of quadrature nodes
weights (numpy.ndarray): array of quadrature weights for the full interval
Qmat (numpy.ndarray): matrix containing the weights for tleft to node
Smat (numpy.ndarray): matrix containing the weights for node to node
delta_m (numpy.ndarray): array of distances between nodes
right_is_node (bool): flag to indicate whether right point is collocation node
left_is_node (bool): flag to indicate whether left point is collocation node
"""
def __init__(self, num_nodes, tleft, tright):
"""
Initialization
Args:
num_nodes (int): number of nodes
tleft (float): left interval boundary (usually 0)
tright (float): right interval boundary (usually 1)
"""
super(EquidistantSpline_Right, self).__init__(num_nodes, tleft, tright)
if num_nodes < 2:
raise CollocationError("Number of nodes should be at least 2 for equidist. splines, but is %d" % num_nodes)
# This is a fixed order since we are using splines here! No spectral accuracy!
self.order = min(num_nodes - 1, 3) # We need: 1<=order<=5 and order < num_nodes
self.nodes = self._getNodes
self.weights = self._getWeights(tleft, tright)
self.Qmat = self._gen_Qmatrix
self.Smat = self._gen_Smatrix
self.delta_m = self._gen_deltas
self.left_is_node = False
self.right_is_node = True
@property
def _getNodes(self):
"""
Compute equidistant nodes with right end point included
Returns:
np.ndarray: array of equidistant nodes
"""
return np.linspace(self.tleft + 1.0 / self.num_nodes, self.tright, self.num_nodes, endpoint=True)
def _getWeights(self, a, b):
"""
Computes weights using spline interpolation instead of Gaussian quadrature
Args:
a (float): left interval boundary
b (float): right interval boundary
Returns:
np.ndarray: weights of the collocation formula given by the nodes
"""
# get the defining tck's for each spline basis function
circ_one = np.zeros(self.num_nodes)
circ_one[0] = 1.0
tcks = []
for i in range(self.num_nodes):
tcks.append(
intpl.splrep(self.nodes, np.roll(circ_one, i), xb=self.tleft, xe=self.tright, k=self.order, s=0.0))
weights = np.zeros(self.num_nodes)
for i in range(self.num_nodes):
weights[i] = intpl.splint(a, b, tcks[i])
return weights
| [
"r.speck@fz-juelich.de"
] | r.speck@fz-juelich.de |
b15b1831c7bab23eae614a8ac41760cd3b32dee6 | 214d0b18e3bed9ae2ba33e9a3d9d2b447c13dd2e | /k_fold_cross_validation.py | cd1ff51f751ee9b217b204fc08df53afb3dec931 | [] | no_license | janFrancoo/Machine-Learning-Tutorials | 3cc1d47939fac44630b475ce5bd5dc52f84bde1e | 56dab9722606dc27df2f613bfbd277f27122eb88 | refs/heads/master | 2020-06-17T14:16:06.423200 | 2019-08-03T11:44:47 | 2019-08-03T11:44:47 | 195,948,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.model_selection import train_test_split, cross_val_score
df = load_iris()
x = df.data
y = df.target
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.33)
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(x_train, y_train)
res = knn.predict(x_test)
cm = confusion_matrix(res, y_test)
print(cm)
print("Accuracy = ", accuracy_score(res, y_test))
crossValScore = cross_val_score(knn, x_train, y_train, cv=3)
print("Cross Validation Score = ", crossValScore.mean())
print("Standard Deviation = ", crossValScore.std())
| [
"noreply@github.com"
] | janFrancoo.noreply@github.com |
cc5cf5398f77652abe718d6ae45d9eb527566a58 | f7772d2d686811610763aa177cc37a1ae4e0cb95 | /cosmosis/cosmosis/samplers/star/star_sampler.py | 5a497f019d6d5948cb292f3a5944d94223152aaa | [
"BSD-2-Clause"
] | permissive | joezuntz/summer-school-1 | 51d5cfb9e62a99e33bc1fd89b8ced2aa96440c63 | 0575e0d7cab34a616e107967147c9cc97f0953a6 | refs/heads/master | 2021-05-21T17:56:44.732129 | 2020-04-03T13:40:38 | 2020-04-03T13:40:38 | 252,744,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,535 | py | from __future__ import print_function
from builtins import zip
from builtins import map
from builtins import str
import itertools
import numpy as np
from .. import ParallelSampler
def task(p):
i,p = p
results = star_sampler.pipeline.run_results(p)
#If requested, save the data to file
if star_sampler.save_name and results.block is not None:
results.block.save_to_file(star_sampler.save_name+"_%d"%i, clobber=True)
return (results.post, results.prior, results.extra)
LARGE_JOB_SIZE = 1000000
class StarSampler(ParallelSampler):
parallel_output = False
sampler_outputs = [("prior", float),("post", float)]
understands_fast_subspaces = True
def config(self):
global star_sampler
star_sampler = self
self.converged = False
self.nsample = self.read_ini("nsample_dimension", int, 1)
self.save_name = self.read_ini("save", str, "")
self.nstep = self.read_ini("nstep", int, -1)
self.allow_large = self.read_ini("allow_large", bool, False)
self.sample_points = None
self.ndone = 0
def setup_sampling(self):
#Number of jobs to do at once.
#Can be taken from the ini file.
#Otherwise it is set to -1 by default
if self.nstep==-1:
#if in parallel mode do a chunk of 4*the number of tasks to do at once
#chosen arbitrarily.
if self.pool:
self.nstep = 4*self.pool.size
#if not parallel then just do a single slice through one dimension each chunk
else:
self.nstep = self.nsample
if self.output:
for name,value in zip(self.pipeline.varied_params, self.pipeline.start_vector()):
self.output.metadata("fid_{0}".format(name), value)
#Also Generate the complete collection of parameter sets to run over.
#This doesn't actually keep them all in memory, it is just the conceptual
#outer product
total_samples = self.nsample*len(self.pipeline.varied_params)
print()
print("Total number of star samples: ", total_samples)
if total_samples>LARGE_JOB_SIZE:
print("That is a very large number of samples.")
if self.allow_large:
print("But you set allow_large=T so I will continue")
else:
print("This is suspicously large so I am going to stop")
print("If you really want to do this set allow_large=T in the")
print("[star] section of the ini file.")
raise ValueError("Suspicously large number of star points %d ( = n_samp * n_dim = %d * %d); set allow_large=T in [star] section to permit this."%(total_samples,self.nsample,len(self.pipeline.varied_params)))
print()
sample_points = []
start = self.pipeline.start_vector()
for i,param in enumerate(self.pipeline.varied_params):
for p in np.linspace(*param.limits, num=self.nsample):
v = start.copy()
v[i] = p
sample_points.append(v)
self.sample_points = iter(sample_points)
def execute(self):
#First run only:
if self.sample_points is None:
self.setup_sampling()
#Chunk of tasks to do this run through, of size nstep.
#This advances the self.sample_points forward so it knows
#that these samples have been done
samples = list(itertools.islice(self.sample_points, self.nstep))
#If there are no samples left then we are done.
if not samples:
self.converged=True
return
#Each job has an index number in case we are saving
#the output results from each one
sample_index = np.arange(len(samples)) + self.ndone
jobs = list(zip(sample_index, samples))
#Actually compute the likelihood results
if self.pool:
results = self.pool.map(task, jobs)
else:
results = list(map(task, jobs))
#Update the count
self.ndone += len(results)
#Save the results of the sampling
for sample, result in zip(samples, results):
#Optionally save all the results calculated by each
#pipeline run to files
(post, prior, extra) = result
#always save the usual text output
self.output.parameters(sample, extra, prior, post)
def is_converged(self):
return self.converged
| [
"joezuntz@googlemail.com"
] | joezuntz@googlemail.com |
90666c5cc6eff1cf432eac73b42bd2a8ba4eba13 | b9adf873bc36f8e1244d889cedeee22ad9a3cb6b | /PythonForArcGIS/SF_PFA2/ch10/script/bufferLoopRange.py | 88a69b10d0421228a1d531d3d0412cb609489a48 | [] | no_license | forgetbear/B_PYTHON_GIS | e2860b1dfdf1e714ffef0fad90949c083b1d4ab4 | a92cdb97f601a4c61c399ad75c5f839983fab956 | refs/heads/master | 2023-06-08T03:18:03.786982 | 2023-05-28T16:42:37 | 2023-05-28T16:42:37 | 104,445,746 | 0 | 0 | null | 2017-09-22T07:34:40 | 2017-09-22T07:34:40 | null | UTF-8 | Python | false | false | 595 | py | # bufferLoopRange.py
# Purpose: Buffer a park varying buffer distances from 1 to 5 miles.
import arcpy
arcpy.env.workspace = 'C:/gispy/data/ch10'
outDir = 'C:/gispy/scratch/'
arcpy.env.overwriteOutput = True
inName = 'park.shp'
for num in range(1, 6):
# Set the buffer distance based on num ('1 miles', '2 miles', ...).
distance = '{0} miles'.format(num)
# Set the output name based on num ('buffer1.shp', 'buffer2.shp', ...)
outName = outDir + 'buffer{0}.shp'.format(num)
arcpy.Buffer_analysis(inName, outName, distance)
print '{0}{1} created.'.format(outDir, outName)
| [
"aaronhsu219@gmail.com"
] | aaronhsu219@gmail.com |
5c18aa542642be81a4a39dd0d778ab66114f13fa | cc13092b652113221a877db2bf907c050dc30aaa | /meta_reward_learning/textworld/lib/graph_search.py | 71904064535272cd066fb4381f410246e4c7b088 | [
"MIT",
"Apache-2.0"
] | permissive | Th-Fo/google-research | 1e62ee50f76c2931fdb6db1de736a85e94251e25 | 9d7bd968843c27216d01c92ff832b1cd58cafa85 | refs/heads/master | 2020-12-27T17:30:43.916109 | 2020-05-25T17:06:20 | 2020-05-25T17:06:20 | 237,989,659 | 1 | 3 | Apache-2.0 | 2020-05-25T17:06:22 | 2020-02-03T14:52:08 | null | UTF-8 | Python | false | false | 2,199 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graph search algorithms for exploration."""
import random
# pylint: disable=g-import-not-at-top
try:
import queue
except ImportError:
import six.moves.queue as queue
# pylint: enable=g-import-not-at-top
def check_valid(graph, pos):
y, x = pos
y_max, x_max = graph.shape
if ((0 <= y and y < y_max) and (0 <= x and x < x_max)) and (graph[pos] >= 0):
return True
else:
return False
def bfs_paths(graph, agent, goal, num_actions, maxlen):
"""Find paths from any start position to a goal position using BFS."""
path_queue = queue.Queue()
path_queue.put((agent.pos, []))
while not path_queue.empty():
curr_pos, path = path_queue.get()
if len(path) >= maxlen:
continue
for action in range(num_actions):
agent.reset(curr_pos)
agent.act(action)
if check_valid(graph, agent.pos):
new_path = path + [action]
if agent.pos == goal:
yield new_path
else:
path_queue.put((agent.pos, new_path))
def dfs_paths(graph, agent, goal, num_actions, maxlen):
""""Find paths from any start position to a goal position using DFS."""
stack = [(agent.pos, [])]
all_actions = list(range(num_actions))
while stack:
curr_pos, path = stack.pop()
if len(path) >= maxlen:
continue
random.shuffle(all_actions)
for action in all_actions:
agent.reset(curr_pos)
agent.act(action)
if check_valid(graph, agent.pos):
new_path = path + [action]
if agent.pos == goal:
yield new_path
else:
stack.append((agent.pos, new_path))
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
69245191cd77a6704b51bcdadfef4132821d8865 | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /eXe/rev2283-2409/base-trunk-2283/exe/engine/idevice.py | bfd60b87586091ea323f16d24950704388984cf6 | [] | no_license | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,006 | py | """
The base class for all iDevices
"""
import copy
import logging
from exe.engine.persist import Persistable
from exe.engine.translate import lateTranslate
log = logging.getLogger(__name__)
class Idevice(Persistable):
"""
The base class for all iDevices
iDevices are mini templates which the user uses to create content in the
package
"""
nextId = 1
NoEmphasis, SomeEmphasis, StrongEmphasis = range(3)
def __init__(self, title, author, purpose, tip, icon, parentNode=None):
"""Initialize a new iDevice, setting a unique id"""
log.debug("Creating iDevice")
self.edit = True
self.lastIdevice = True
self.emphasis = Idevice.NoEmphasis
self.version = 0
self.id = unicode(Idevice.nextId)
Idevice.nextId += 1
self.parentNode = parentNode
self._title = title
self._author = author
self._purpose = purpose
self._tip = tip
self.icon = icon
self.userResources = []
if self.icon:
self.systemResources = ["icon_"+self.icon+".gif"]
else:
self.systemResources = []
def get_title(self):
"""
Gives a nicely encoded and translated title that can be put inside
xul labels (eg. <label value="my "idevice"">)
"""
if self._title:
title = _(self._title)
title = title.replace('&', '&')
title = title.replace('"', '"')
return title
else:
return u''
def set_title(self, value):
"""
Sets self._title
"""
self._title = value
title = property(get_title, set_title)
rawTitle = lateTranslate('title')
author = lateTranslate('author')
purpose = lateTranslate('purpose')
tip = lateTranslate('tip')
def __cmp__(self, other):
"""
Compare this iDevice with other
"""
return cmp(self.id, other.id)
def clone(self):
"""
Clone an iDevice just like this one
"""
log.debug("Cloning iDevice")
newIdevice = copy.deepcopy(self)
newIdevice.id = unicode(Idevice.nextId)
Idevice.nextId += 1
return newIdevice
def delete(self):
"""
delete an iDevice from it's parentNode
"""
while self.userResources:
self.userResources[0].delete()
if self.parentNode:
self.parentNode.idevices.remove(self)
self.parentNode = None
def isFirst(self):
"""
Return true if this is the first iDevice in this node
"""
index = self.parentNode.idevices.index(self)
return index == 0
def isLast(self):
"""
Return true if this is the last iDevice in this node
"""
index = self.parentNode.idevices.index(self)
return index == len(self.parentNode.idevices) - 1
def movePrev(self):
"""
Move to the previous position
"""
parentNode = self.parentNode
index = parentNode.idevices.index(self)
if index > 0:
temp = parentNode.idevices[index - 1]
parentNode.idevices[index - 1] = self
parentNode.idevices[index] = temp
def moveNext(self):
"""
Move to the next position
"""
parentNode = self.parentNode
index = parentNode.idevices.index(self)
if index < len(parentNode.idevices) - 1:
temp = parentNode.idevices[index + 1]
parentNode.idevices[index + 1] = self
parentNode.idevices[index] = temp
def setParentNode(self, parentNode):
"""
Change parentNode
"""
if self.parentNode:
self.parentNode.idevices.remove(self)
parentNode.addIdevice(self)
def onResourceNamesChanged(self, resourceNamesChanged):
"""
Called when the iDevice's resources need their names changed
Overridden by derieved classes
"""
pass
def _upgradeIdeviceToVersion1(self):
"""
Upgrades the Idevice class members from version 0 to version 1.
Should be called in derived classes.
"""
log.debug("upgrading to version 1")
self._title = self.__dict__.get('title', self.title)
self._author = self.__dict__.get('author', self.title)
self._purpose = self.__dict__.get('purpose', self.title)
self._tip = self.__dict__.get('tip', self.title)
def _upgradeIdeviceToVersion2(self):
"""
Upgrades the Idevice class members from version 1 to version 2.
Should be called in derived classes.
"""
log.debug("upgrading to version 2, for 0.12")
self.userResources = []
if self.icon:
self.systemResources = ["icon_"+self.icon+".gif"]
else:
self.systemResources = []
| [
"joliebig@fim.uni-passau.de"
] | joliebig@fim.uni-passau.de |
915dabab5c458624fe039b5c88de3417309b62b0 | 779bf1355be59dc85a231d7d8fe822d0fca78c9f | /coords.py | 046bee245816be8db92b594b2c0d343b263de4e8 | [
"MIT"
] | permissive | euribates/Jupyter-Intro | a37ee963e56b0335fcd58f1897ee698b2fca6368 | a199655436cc4ccd41ec22398a1c5212c541f24b | refs/heads/master | 2018-10-01T02:42:55.713920 | 2018-06-20T15:02:49 | 2018-06-20T15:02:49 | 81,320,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,678 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import random
import math
import pygame
pygame.init()
size = width, height = 640, 480
center = offset_x, offset_y = width // 2, height // 2
zoom = 20
screen = pygame.display.set_mode(size)
# Colores
black = (0, 0 , 0)
white = (255, 255, 255)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (51, 102, 255)
yellow = (255, 255, 0)
silver = (102, 102, 102)
def random_color():
return pygame.Color(
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
255,
)
class Point:
def __init__(self, x=0, y=0, color=white):
self.x = x
self.y = y
self.color = color
def scale(self):
x, y = self.x, self.y
x = offset_x + x*zoom
y = offset_y - y*zoom
return int(round(x)), int(round(y))
def move(self):
self.x += random.random() / 20.0 - 0.025
self.y += random.random() / 20.0 - 0.025
return self
def distance(self, x, y):
return (self.x - x)**2 + (self.y - y)**2
def __repr__(self):
name = self.__class__.__name__
return '{}(x={}, y={}, color={})'.format(
name, self.x, self.y, self.color
)
def draw(self, canvas):
x, y = self.scale()
canvas.set_at((x, y), self.color) # The point itself
canvas.set_at((x-1, y), self.color) # cross
canvas.set_at((x+1, y), self.color)
canvas.set_at((x, y-1), self.color)
canvas.set_at((x, y+1), self.color)
@classmethod
def random(self):
x = random.randint(0, width)
y = random.randint(0, width)
color = random_color()
return Point(x, y, color)
class Triangle(Point):
def draw(self, canvas):
x, y = self.scale()
vertices = [
(x-4, y+4),
(x, y-4),
(x+4, y+4)
]
pygame.draw.polygon(canvas, self.color, vertices, 0)
class Circle(Point):
def draw(self, canvas):
x, y = self.scale()
pygame.draw.circle(canvas, self.color, (x,y), 6, 0)
class Square(Point):
def draw(self, canvas):
x, y = self.scale()
pygame.draw.rect(canvas, self.color, (x-4, y-4, 9, 9))
points = [
Circle(3, 4, red),
Circle(5, -3, green),
Circle(-2, 5, blue),
Circle(-4, 2, yellow),
Square(2, -2, red),
Square(-1, -5, green),
Square(-3, -2, blue),
Square(4, 0, yellow),
Triangle(-5, 0, red),
Triangle(0, 6, green),
Triangle(0, -3, blue),
Triangle(0, 0, yellow),
]
def draw_axis(screen):
pygame.draw.line(screen, silver, (0, offset_y), (width, offset_y))
for step in range(0, width, zoom):
pygame.draw.line(screen, silver,
(step, offset_y-2),
(step, offset_y+2)
)
for step in range(0, height, zoom):
pygame.draw.line(screen, silver,
(offset_x-2, step),
(offset_x+2, step)
)
pygame.draw.line(screen, silver, (offset_x, 0), (offset_x, height))
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(0)
if event.type == pygame.MOUSEBUTTONUP:
x, y = pygame.mouse.get_pos()
x = int(round((x - offset_x) / zoom))
y = -int(round((y - offset_y) / zoom))
print(x, y)
Shape = random.choice([Square, Triangle, Circle])
points.append(Shape(x, y, random_color()))
screen.fill(black)
draw_axis(screen)
for p in points:
p.move()
p.draw(screen)
pygame.display.flip()
| [
"euribates@gmail.com"
] | euribates@gmail.com |
73685887968e64473520eebdf7559eab94a3c3aa | 0760fb4901a75766921a205b55686d6d6f049b30 | /rllib/core/models/tf/heads.py | 9e3a413131d0a1fe71e908ba068b781744d1648d | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | ray-project/ray | a4bb6940b08b59a61ef0b8e755a52d8563a2f867 | edba68c3e7cf255d1d6479329f305adb7fa4c3ed | refs/heads/master | 2023-08-31T03:36:48.164405 | 2023-08-31T03:20:38 | 2023-08-31T03:20:38 | 71,932,349 | 29,482 | 5,669 | Apache-2.0 | 2023-09-14T21:48:14 | 2016-10-25T19:38:30 | Python | UTF-8 | Python | false | false | 9,095 | py | import functools
from typing import Optional
import numpy as np
from ray.rllib.core.models.base import Model
from ray.rllib.core.models.configs import (
CNNTransposeHeadConfig,
FreeLogStdMLPHeadConfig,
MLPHeadConfig,
)
from ray.rllib.core.models.specs.checker import SpecCheckingError
from ray.rllib.core.models.specs.specs_base import Spec
from ray.rllib.core.models.specs.specs_base import TensorSpec
from ray.rllib.core.models.tf.base import TfModel
from ray.rllib.core.models.tf.primitives import TfCNNTranspose, TfMLP
from ray.rllib.utils import try_import_tf
from ray.rllib.utils.annotations import override
tf1, tf, tfv = try_import_tf()
def auto_fold_unfold_time(input_spec: str):
"""Automatically folds/unfolds the time dimension of a tensor.
This is useful when calling the model requires a batch dimension only, but the
input data has a batch- and a time-dimension. This decorator will automatically
fold the time dimension into the batch dimension before calling the model and
unfold the batch dimension back into the time dimension after calling the model.
Args:
input_spec: The input spec of the model.
Returns:
A decorator that automatically folds/unfolds the time_dimension if present.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(self, input_data, **kwargs):
if not hasattr(self, input_spec):
raise ValueError(
"The model must have an input_specs attribute to "
"automatically fold/unfold the time dimension."
)
if not tf.is_tensor(input_data):
raise ValueError(
f"input_data must be a tf.Tensor to fold/unfold "
f"time automatically, but got {type(input_data)}."
)
# Attempt to fold/unfold the time dimension.
actual_shape = tf.shape(input_data)
spec = getattr(self, input_spec)
try:
# Validate the input data against the input spec to find out it we
# should attempt to fold/unfold the time dimension.
spec.validate(input_data)
except ValueError as original_error:
# Attempt to fold/unfold the time dimension.
# Calculate a new shape for the input data.
b, t = actual_shape[0], actual_shape[1]
other_dims = actual_shape[2:]
reshaped_b = b * t
new_shape = tf.concat([[reshaped_b], other_dims], axis=0)
reshaped_inputs = tf.reshape(input_data, new_shape)
try:
spec.validate(reshaped_inputs)
except ValueError as new_error:
raise SpecCheckingError(
f"Attempted to call {func} with input data of shape "
f"{actual_shape}. RLlib attempts to automatically fold/unfold "
f"the time dimension because {actual_shape} does not match the "
f"input spec {spec}. In an attempt to fold the time "
f"dimensions to possibly fit the input specs of {func}, "
f"RLlib has calculated the new shape {new_shape} and "
f"reshaped the input data to {reshaped_inputs}. However, "
f"the input data still does not match the input spec. "
f"\nOriginal error: \n{original_error}. \nNew error:"
f" \n{new_error}."
)
# Call the actual wrapped function
outputs = func(self, reshaped_inputs, **kwargs)
# Attempt to unfold the time dimension.
return tf.reshape(
outputs, tf.concat([[b, t], tf.shape(outputs)[1:]], axis=0)
)
# If above we could validate the spec, we can call the actual wrapped
# function.
return func(self, input_data, **kwargs)
return wrapper
return decorator
class TfMLPHead(TfModel):
def __init__(self, config: MLPHeadConfig) -> None:
TfModel.__init__(self, config)
self.net = TfMLP(
input_dim=config.input_dims[0],
hidden_layer_dims=config.hidden_layer_dims,
hidden_layer_activation=config.hidden_layer_activation,
hidden_layer_use_layernorm=config.hidden_layer_use_layernorm,
hidden_layer_use_bias=config.hidden_layer_use_bias,
output_dim=config.output_layer_dim,
output_activation=config.output_layer_activation,
output_use_bias=config.output_layer_use_bias,
)
@override(Model)
def get_input_specs(self) -> Optional[Spec]:
return TensorSpec("b, d", d=self.config.input_dims[0], framework="tf2")
@override(Model)
def get_output_specs(self) -> Optional[Spec]:
return TensorSpec("b, d", d=self.config.output_dims[0], framework="tf2")
@override(Model)
@auto_fold_unfold_time("input_specs")
def _forward(self, inputs: tf.Tensor, **kwargs) -> tf.Tensor:
return self.net(inputs)
class TfFreeLogStdMLPHead(TfModel):
"""An MLPHead that implements floating log stds for Gaussian distributions."""
def __init__(self, config: FreeLogStdMLPHeadConfig) -> None:
TfModel.__init__(self, config)
assert config.output_dims[0] % 2 == 0, "output_dims must be even for free std!"
self._half_output_dim = config.output_dims[0] // 2
self.net = TfMLP(
input_dim=config.input_dims[0],
hidden_layer_dims=config.hidden_layer_dims,
hidden_layer_activation=config.hidden_layer_activation,
hidden_layer_use_layernorm=config.hidden_layer_use_layernorm,
hidden_layer_use_bias=config.hidden_layer_use_bias,
output_dim=self._half_output_dim,
output_activation=config.output_layer_activation,
output_use_bias=config.output_layer_use_bias,
)
self.log_std = tf.Variable(
tf.zeros(self._half_output_dim),
name="log_std",
dtype=tf.float32,
trainable=True,
)
@override(Model)
def get_input_specs(self) -> Optional[Spec]:
return TensorSpec("b, d", d=self.config.input_dims[0], framework="tf2")
@override(Model)
def get_output_specs(self) -> Optional[Spec]:
return TensorSpec("b, d", d=self.config.output_dims[0], framework="tf2")
@override(Model)
@auto_fold_unfold_time("input_specs")
def _forward(self, inputs: tf.Tensor, **kwargs) -> tf.Tensor:
# Compute the mean first, then append the log_std.
mean = self.net(inputs)
log_std_out = tf.tile(tf.expand_dims(self.log_std, 0), [tf.shape(inputs)[0], 1])
logits_out = tf.concat([mean, log_std_out], axis=1)
return logits_out
class TfCNNTransposeHead(TfModel):
def __init__(self, config: CNNTransposeHeadConfig) -> None:
super().__init__(config)
# Initial, inactivated Dense layer (always w/ bias).
# This layer is responsible for getting the incoming tensor into a proper
# initial image shape (w x h x filters) for the suceeding Conv2DTranspose stack.
self.initial_dense = tf.keras.layers.Dense(
units=int(np.prod(config.initial_image_dims)),
activation=None,
use_bias=True,
)
# The main CNNTranspose stack.
self.cnn_transpose_net = TfCNNTranspose(
input_dims=config.initial_image_dims,
cnn_transpose_filter_specifiers=config.cnn_transpose_filter_specifiers,
cnn_transpose_activation=config.cnn_transpose_activation,
cnn_transpose_use_layernorm=config.cnn_transpose_use_layernorm,
cnn_transpose_use_bias=config.cnn_transpose_use_bias,
)
@override(Model)
def get_input_specs(self) -> Optional[Spec]:
return TensorSpec("b, d", d=self.config.input_dims[0], framework="tf2")
@override(Model)
def get_output_specs(self) -> Optional[Spec]:
return TensorSpec(
"b, w, h, c",
w=self.config.output_dims[0],
h=self.config.output_dims[1],
c=self.config.output_dims[2],
framework="tf2",
)
@override(Model)
@auto_fold_unfold_time("input_specs")
def _forward(self, inputs: tf.Tensor, **kwargs) -> tf.Tensor:
# Push through initial dense layer to get dimensions of first "image".
out = self.initial_dense(inputs)
# Reshape to initial 3D (image-like) format to enter CNN transpose stack.
out = tf.reshape(
out,
shape=(-1,) + tuple(self.config.initial_image_dims),
)
# Push through CNN transpose stack.
out = self.cnn_transpose_net(out)
# Add 0.5 to center the (always non-activated, non-normalized) outputs more
# around 0.0.
return out + 0.5
| [
"noreply@github.com"
] | ray-project.noreply@github.com |
55e9f2f4de361a1f709ffdf0a753ec52d4d177e2 | 2581fbdc72887143376a8f9d8f0da0f1508b9cdf | /Flask/06-Larger-Flask-Applications/01-Using-Blueprints/myproject/owners/forms.py | a398f9af17e4d3efc9a0b241ea479d29218f4db5 | [
"Apache-2.0"
] | permissive | Sandy1811/python-for-all | 6e8a554a336b6244af127c7bcd51d36018b047d9 | fdb6878d93502773ba8da809c2de1b33c96fb9a0 | refs/heads/master | 2022-05-16T02:36:47.676560 | 2019-08-16T08:35:42 | 2019-08-16T08:35:42 | 198,479,841 | 1 | 0 | Apache-2.0 | 2022-03-11T23:56:32 | 2019-07-23T17:39:38 | Jupyter Notebook | UTF-8 | Python | false | false | 241 | py | from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, SubmitField
class AddForm(FlaskForm):
name = StringField('Name of Owner:')
pup_id = IntegerField("Id of Puppy: ")
submit = SubmitField('Add Owner')
| [
"sndp1811@gmail.com"
] | sndp1811@gmail.com |
5fb26953854ce78b6558fe662ba2e222f16ae8ce | 99cf54dd53c956c12d27c15fc15b206c70a462cf | /ch05/5-4-continue.py | 29da73ff9e7640d03ce16ab64176b2b3ebd5660f | [] | no_license | 404232077/python-course | b5707735fd899af5be07b5c643f0188db54a2ae3 | 6845010db7aa8414138b0cfd8101745532c6c01e | refs/heads/master | 2020-06-15T16:54:17.367489 | 2019-07-05T06:40:14 | 2019-07-05T06:40:14 | 195,346,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | for i in range(1,6):
if i == 3:
continue
print(i)
i = 0
while (i<5):
i += 1
if i == 3:
continue
print(i) | [
"40423207@gm.nfu.edu.tw"
] | 40423207@gm.nfu.edu.tw |
75786aef013e828db98c42bcabea6ce32f0e6106 | cbc27ca33656dc85d462b2e7dc515fb991b7eda4 | /app/colors.py | 04a4108cff5f335df2c2112a2731227d9952c16d | [] | no_license | AlexandreMarcotte/PolyCortex_Gui | d82ea86bb1c068005835ad305c7e4fdaaca89405 | c3e70783daea793988ea8bd3b0a58f87fc50ec8f | refs/heads/master | 2022-10-26T06:31:59.343475 | 2019-05-17T16:31:57 | 2019-05-17T16:31:57 | 141,066,152 | 3 | 0 | null | 2021-03-25T22:40:29 | 2018-07-15T23:43:34 | Python | UTF-8 | Python | false | false | 1,865 | py | # Colors used for the regions in the signal where event occur
red = (255, 0, 0, 10)
p300_red = (255, 0, 0, 255)
pale_red = (255, 0, 0, 35)
green = (0, 255, 0, 45)
p300_green = (0, 255, 0, 255)
p300_white = (255, 255, 255, 255)
blue = (0, 0, 255, 10)
yellow = (255, 255, 0, 45)
purple = (146, 56, 219, 45)
dark_grey = (3, 3, 3)
pen_colors = ['r', 'y', 'g', 'c', 'b', 'm',
(100, 100, 100), 'w', 'k', (100, 100, 100), (100, 100, 100),
(100, 100, 100), (100, 100, 100), (100, 100, 100), (100, 100, 100),
(100, 100, 100), (100, 100, 100), (100, 100, 100), (100, 100, 100)]
button_colors = ['red', 'yellow', 'green', 'cyan',
'blue', 'magenta', 'grey', 'white',
'red', 'yellow', 'green', 'cyan',
'blue', 'magenta', 'grey', 'white',
'red', 'yellow', 'green', 'cyan',
'blue', 'magenta', 'grey', 'white'] # TODO: ALEXM: Generate colors from pyqt instead
# 58 167 215
dark_blue = 'rgba(0, 0, 80, 0.4)'
# Polycortex Color
# lighter blue
# dark_blue_tab = 'rgba(70, 175, 230, 1)'
dark_blue_tab = 'rgba(60, 160, 210, 1)'
# slightly Darker
# dark_blue_tab = 'rgba(30, 130, 170, 1)'
# Really Darker
# dark_blue_tab = 'rgba(18, 90, 140, 1)'
# old
# dark_blue = 'rgba(0, 0, 80, 0.4)'
# dark_blue_tab = 'rgba(62, 62, 160, 1)'
grey = 'rgba(100, 100, 100, 0.5)'
light_grey = 'rgba(130, 130, 130, 0.7)'
grey2 = 'rgba(160, 160, 160, 0.5)'
grey3 = 'rgba(200, 200, 200, 0.6'
label_grey = 'rgba(215, 215, 215, 0.7)'
green_b = 'rgba(0, 100, 0, 0.5)'
red_b = 'rgba(100, 0, 0, 0.5)'
blue_b = 'rgba(0, 0, 170, 0.5)'
white = 'rgba(255, 255, 255, 1)'
black = 'rgba(0, 0, 0, 0.5)'
DARK_GREY = '#585858' # hexa
LIGHT_GREY = '#C8C8C8'
blue_plane='rgba(0, 0, 255, 0.4)'
green_plane='rgba(0, 255, 0, 0.7)'
red_plane='rgba(255, 0, 0, 0.4)'
| [
"alexandre.marcotte.1094@gmail.com"
] | alexandre.marcotte.1094@gmail.com |
ae5550ca8397473eab8016c7442730089e83b4c7 | 65dce36be9eb2078def7434455bdb41e4fc37394 | /66 Plus One.py | 6576b5adcd01a259a42b93a3c4bea3c281e1c33e | [] | no_license | EvianTan/Lintcode-Leetcode | 9cf2d2f6a85c0a494382b9c347bcdb4ee0b5d21a | d12dd31e98c2bf24acc20c5634adfa950e68bd97 | refs/heads/master | 2021-01-22T08:13:55.758825 | 2017-10-20T21:46:23 | 2017-10-20T21:46:23 | 92,607,185 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | '''
Given a non-negative integer represented as a non-empty array of digits, plus one to the integer.
You may assume the integer do not contain any leading zero, except the number 0 itself.
The digits are stored such that the most significant digit is at the head of the list.
'''
class Solution(object):
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
hold = ''
for i in digits:
hold += str(i)
res = str(int(hold)+1)
return [int(i) for i in res] | [
"yiyun.tan@uconn.edu"
] | yiyun.tan@uconn.edu |
30c81a894419eab115ec96db9261083590ebfc47 | dae212cb615e5eba3fe8108799a39bc09d7bddb6 | /leetcode/0114_flatten_binary_tree_to_linked_list.py | 279786e78ff2bcc7c0585f3c696750b0a152436c | [] | no_license | cs-cordero/interview-prep | a291b5ce2fb8461449e6e27a1f23e12b54223540 | c3b5b4612f3641572d2237e36aa23019c680c799 | refs/heads/master | 2022-05-23T10:39:59.817378 | 2020-04-29T12:57:12 | 2020-04-29T12:57:12 | 76,767,250 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 844 | py | from utils import Empty, TreeNode
class Solution:
def flatten(self, root: TreeNode) -> None:
if not root:
return
if root.right and root.left:
traverse_to_next(root.left).right = root.right
if root.left:
root.right = root.left
root.left = None
if root.right:
self.flatten(root.right)
def traverse_to_next(root: TreeNode) -> None:
while True:
while root.right:
root = root.right
if root.left:
root = root.left
else:
return root
root = TreeNode.from_array([1, 2, 5, 3, 4, Empty, 6])
expected = TreeNode(1)
current = expected
for i in range(2, 7):
current.right = TreeNode(i)
current = current.right
Solution().flatten(root)
assert TreeNode.subtrees_match(root, expected)
| [
"ccordero@protonmail.com"
] | ccordero@protonmail.com |
77d28f6918656e3e80ca82ecd1f0f0d266db8677 | a6610e191090e216b0e0f23018cecc5181400a7a | /robotframework-ls/tests/robotframework_ls_tests/test_signature_help.py | 52c4d14f1ade79e59b100f9e7482619e5995ca6a | [
"Apache-2.0"
] | permissive | JohanMabille/robotframework-lsp | d7c4c00157dd7c12ab15b7125691f7052f77427c | 610f0257fdcd79b8c38107a0ecf600f60160bc1f | refs/heads/master | 2023-01-19T10:29:48.982578 | 2020-11-25T13:46:22 | 2020-11-25T13:46:22 | 296,245,093 | 0 | 0 | NOASSERTION | 2020-09-17T06:58:54 | 2020-09-17T06:58:53 | null | UTF-8 | Python | false | false | 774 | py | def test_signature_help_basic(workspace, libspec_manager, data_regression):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.signature_help import signature_help
workspace.set_root("case4", libspec_manager=libspec_manager)
doc = workspace.get_doc("case4.robot")
doc.source += """
*** Test Cases ***
Log It
Log """
completion_context = CompletionContext(doc, workspace=workspace.ws)
result = signature_help(completion_context)
signatures = result["signatures"]
# Don't check the signature documentation in the data regression so that the
# test doesn't become brittle.
docs = signatures[0].pop("documentation")
assert "Log" in docs
data_regression.check(result)
| [
"fabiofz@gmail.com"
] | fabiofz@gmail.com |
d5e085cb5ff4e318e55a21d55d80d6291f46ef1a | 5d06a33d3685a6f255194b13fd2e615e38d68850 | /opytimark/markers/boolean.py | b94c6a3c83585ca07ebb257de8df8a53c75b5e43 | [
"Apache-2.0"
] | permissive | sarikoudis/opytimark | 617a59eafaabab5e67bd4040473a99f963df7788 | cad25623f23ce4b509d59381cf7bd79e41a966b6 | refs/heads/master | 2023-07-24T04:19:55.869169 | 2021-09-03T13:09:45 | 2021-09-03T13:09:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,964 | py | """Boolean-based benchmarking functions.
"""
import itertools as it
import numpy as np
import opytimark.utils.constants as c
import opytimark.utils.decorator as d
import opytimark.utils.exception as e
from opytimark.core import Benchmark
class Knapsack(Benchmark):
"""Knapsack class implements a boolean-based version of the Knapsack problem.
.. math:: f(\mathbf{x}) = f(x_1, x_2, \ldots, x_n) = \min -{\sum_{i=1}^{n}v_i x_i}
s.t.
.. math:: \sum_{i=1}^{n}w_i x_i \leq b
Domain:
The function is evaluated using :math:`x_i \in \{0, 1\} \mid i = \{1, 2, \ldots, n\}`.
"""
def __init__(self, name='Knapsack', dims=-1, continuous=False, convex=False,
differentiable=False, multimodal=False, separable=False,
values=(0,), weights=(0,), max_capacity=0.0):
"""Initialization method.
Args:
name (str): Name of the function.
dims (int): Number of allowed dimensions.
continuous (bool): Whether the function is continuous.
convex (bool): Whether the function is convex.
differentiable (bool): Whether the function is differentiable.
multimodal (bool): Whether the function is multimodal.
separable (bool): Whether the function is separable.
values (tuple): Tuple of items values.
weights (tuple): Tuple of items weights.
max_capacity: Maximum capacity of the knapsack.
"""
super(Knapsack, self).__init__(name, dims, continuous,
convex, differentiable, multimodal, separable)
if len(values) != len(weights):
raise e.SizeError('`values` and `weights` needs to have the same size')
# Items values
self.values = values
# Items weights
self.weights = weights
# Maximum capacity of the knapsack
self.max_capacity = max_capacity
# Re-writes the correct number of dimensions
self.dims = len(values)
@property
def values(self):
"""tuple: values of items in the knapsack.
"""
return self._values
@values.setter
def values(self, values):
if not isinstance(values, tuple):
raise e.TypeError('`values` should be a tuple')
self._values = values
@property
def weights(self):
"""tuple: Weights of items in the knapsack.
"""
return self._weights
@weights.setter
def weights(self, weights):
if not isinstance(weights, tuple):
raise e.TypeError('`weights` should be a tuple')
self._weights = weights
@property
def max_capacity(self):
"""float: Maximum capacity of the knapsack.
"""
return self._max_capacity
@max_capacity.setter
def max_capacity(self, max_capacity):
if not isinstance(max_capacity, (float, int)):
raise e.TypeError('`max_capacity` should be a float or integer')
if max_capacity < 0:
raise e.ValueError('`max_capacity` should be >= 0')
self._max_capacity = max_capacity
@d.check_exact_dimension
def __call__(self, x):
"""This method returns the function's output when the class is called.
Args:
x (np.array): An input array for calculating the function's output.
Returns:
The benchmarking function output `f(x)`.
"""
# Gathering an array of possible values
v = np.array(list(it.compress(self.values, x)))
# Gathering an array of possible weights
w = np.array(list(it.compress(self.weights, x)))
# If the sum of weights exceed the maximum capacity
if np.sum(w) > self.max_capacity:
# Returns the maximum number possible
return c.FLOAT_MAX
# Returns its negative sum as it is a minimization problem
return -np.sum(v)
| [
"gth.rosa@uol.com.br"
] | gth.rosa@uol.com.br |
444a1bdc1e196a8d209a09c69e8e5c15df8aa12a | c0a49527d2d6bf56c04349bda832875625451a39 | /project/apps/stats/urls.py | 6b857438a50d724df7c3f12febe3c65ead52540e | [
"MIT"
] | permissive | mbi/chin-up | 03d3dd7c74320aee3924ad587f4c78e8dcee815b | 4e55082996c53fcbf3e70157ba59c9c40d1fdbcc | refs/heads/master | 2023-08-21T23:14:26.368231 | 2014-05-20T08:05:55 | 2014-05-20T08:05:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'stats[/]$', 'stats.views.stats_view', name='stats'),
)
| [
"eric@ckcollab.com"
] | eric@ckcollab.com |
67a9389e19ff389ab6171256736ba1a3fed82640 | 12ded90ded46e991e2bfa328d620063ff124622f | /util/reggen/register.py | 8f08ab95d3f84c0cf4031bed759b35dea61f05c2 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | rswarbrick/opentitan | 84456da8bd357497bd812ae6db6000c6e7dcef8b | 7247d8aa62c7fa1878cf91c7bc8a1b2b8dd3fc9e | refs/heads/master | 2023-09-02T20:53:29.274251 | 2021-03-05T18:07:38 | 2021-03-10T11:26:37 | 249,710,844 | 0 | 0 | Apache-2.0 | 2021-05-27T16:11:59 | 2020-03-24T13:08:07 | SystemVerilog | UTF-8 | Python | false | false | 14,265 | py | # Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from typing import Dict, List, Optional
from .access import SWAccess, HWAccess
from .field import Field
from .lib import (check_keys, check_str, check_name, check_bool,
check_list, check_str_list, check_int)
from .params import Params
from .reg_base import RegBase
REQUIRED_FIELDS = {
'name': ['s', "name of the register"],
'desc': ['t', "description of the register"],
'fields': ['l', "list of register field description groups"]
}
OPTIONAL_FIELDS = {
'swaccess': [
's',
"software access permission to use for "
"fields that don't specify swaccess"
],
'hwaccess': [
's',
"hardware access permission to use for "
"fields that don't specify hwaccess"
],
'hwext': [
's',
"'true' if the register is stored outside "
"of the register module"
],
'hwqe': [
's',
"'true' if hardware uses 'q' enable signal, "
"which is latched signal of software write pulse."
],
'hwre': [
's',
"'true' if hardware uses 're' signal, "
"which is latched signal of software read pulse."
],
'regwen': [
's',
"if register is write-protected by another register, that "
"register name should be given here. empty-string for no register "
"write protection"
],
'resval': [
'd',
"reset value of full register (default 0)"
],
'tags': [
's',
"tags for the register, following the format 'tag_name:item1:item2...'"
],
'shadowed': [
's',
"'true' if the register is shadowed"
],
'update_err_alert': [
's',
"alert that will be triggered if "
"this shadowed register has update error"
],
'storage_err_alert': [
's',
"alert that will be triggered if "
"this shadowed register has storage error"
]
}
class Register(RegBase):
'''Code representing a register for reggen'''
def __init__(self,
offset: int,
name: str,
desc: str,
swaccess: SWAccess,
hwaccess: HWAccess,
hwext: bool,
hwqe: bool,
hwre: bool,
regwen: Optional[str],
tags: List[str],
resval: Optional[int],
shadowed: bool,
fields: List[Field],
update_err_alert: Optional[str],
storage_err_alert: Optional[str]):
super().__init__(offset)
self.name = name
self.desc = desc
self.swaccess = swaccess
self.hwaccess = hwaccess
self.hwext = hwext
if self.hwext and self.hwaccess.key == 'hro' and self.sw_readable():
raise ValueError('hwext flag for {} register is set, but '
'hwaccess is hro and the register value '
'is readable by software mode ({}).'
.format(self.name, self.swaccess.key))
self.hwqe = hwqe
if self.hwext and not self.hwqe and self.sw_writable():
raise ValueError('The {} register has hwext set and is writable '
'by software (mode {}), so must also have hwqe '
'enabled.'
.format(self.name, self.swaccess.key))
self.hwre = hwre
if self.hwre and not self.hwext:
raise ValueError('The {} register specifies hwre but not hwext.'
.format(self.name))
self.regwen = regwen
self.tags = tags
self.shadowed = shadowed
sounds_shadowy = self.name.lower().endswith('_shadowed')
if self.shadowed and not sounds_shadowy:
raise ValueError("Register {} has the shadowed flag but its name "
"doesn't end with the _shadowed suffix."
.format(self.name))
elif sounds_shadowy and not self.shadowed:
raise ValueError("Register {} has a name ending in _shadowed, but "
"the shadowed flag is not set."
.format(self.name))
# Take a copy of fields and then sort by bit index
assert fields
self.fields = fields.copy()
self.fields.sort(key=lambda field: field.bits.lsb)
# Index fields by name and check for duplicates
self.name_to_field = {} # type: Dict[str, Field]
for field in self.fields:
if field.name in self.name_to_field:
raise ValueError('Register {} has duplicate fields called {}.'
.format(self.name, field.name))
self.name_to_field[field.name] = field
# Check that field bits are disjoint
bits_used = 0
for field in self.fields:
field_mask = field.bits.bitmask()
if bits_used & field_mask:
raise ValueError('Register {} has non-disjoint fields: '
'{} uses bits {:#x} used by other fields.'
.format(self.name, field.name,
bits_used & field_mask))
# Compute a reset value and mask from our constituent fields.
self.resval = 0
self.resmask = 0
for field in self.fields:
self.resval |= (field.resval or 0) << field.bits.lsb
self.resmask |= field.bits.bitmask()
# If the register defined a reset value, make sure it matches. We've
# already checked that each field matches, but we still need to make
# sure there weren't any bits unaccounted for.
if resval is not None and self.resval != resval:
raise ValueError('Register {} specifies a reset value of {:#x} but '
'collecting reset values across its fields yields '
'{:#x}.'
.format(self.name, resval, self.resval))
self.update_err_alert = update_err_alert
self.storage_err_alert = storage_err_alert
@staticmethod
def from_raw(reg_width: int,
offset: int,
params: Params,
raw: object) -> 'Register':
rd = check_keys(raw, 'register',
list(REQUIRED_FIELDS.keys()),
list(OPTIONAL_FIELDS.keys()))
name = check_name(rd['name'], 'name of register')
desc = check_str(rd['desc'], 'desc for {} register'.format(name))
swaccess = SWAccess('{} register'.format(name),
rd.get('swaccess', 'none'))
hwaccess = HWAccess('{} register'.format(name),
rd.get('hwaccess', 'hro'))
hwext = check_bool(rd.get('hwext', False),
'hwext flag for {} register'.format(name))
hwqe = check_bool(rd.get('hwqe', False),
'hwqe flag for {} register'.format(name))
hwre = check_bool(rd.get('hwre', False),
'hwre flag for {} register'.format(name))
raw_regwen = rd.get('regwen', '')
if not raw_regwen:
regwen = None
else:
regwen = check_name(raw_regwen,
'regwen for {} register'.format(name))
tags = check_str_list(rd.get('tags', []),
'tags for {} register'.format(name))
raw_resval = rd.get('resval')
if raw_resval is None:
resval = None
else:
resval = check_int(raw_resval,
'resval for {} register'.format(name))
if not 0 <= resval < (1 << reg_width):
raise ValueError('resval for {} register is {}, '
'not an unsigned {}-bit number.'
.format(name, resval, reg_width))
shadowed = check_bool(rd.get('shadowed', False),
'shadowed flag for {} register'
.format(name))
raw_fields = check_list(rd['fields'],
'fields for {} register'.format(name))
if not raw_fields:
raise ValueError('Register {} has no fields.'.format(name))
fields = [Field.from_raw(name,
idx,
len(raw_fields),
swaccess,
hwaccess,
resval,
reg_width,
hwqe,
hwre,
params,
rf)
for idx, rf in enumerate(raw_fields)]
raw_uea = rd.get('update_err_alert')
if raw_uea is None:
update_err_alert = None
else:
update_err_alert = check_name(raw_uea,
'update_err_alert for {} register'
.format(name))
raw_sea = rd.get('storage_err_alert')
if raw_sea is None:
storage_err_alert = None
else:
storage_err_alert = check_name(raw_sea,
'storage_err_alert for {} register'
.format(name))
return Register(offset, name, desc, swaccess, hwaccess,
hwext, hwqe, hwre, regwen,
tags, resval, shadowed, fields,
update_err_alert, storage_err_alert)
def next_offset(self, addrsep: int) -> int:
return self.offset + addrsep
def sw_readable(self) -> bool:
return self.swaccess.key not in ['wo', 'r0w1c']
def sw_writable(self) -> bool:
return self.swaccess.key != 'ro'
def dv_rights(self) -> str:
return self.swaccess.dv_rights()
def get_n_bits(self, bittype: List[str]) -> int:
return sum(field.get_n_bits(self.hwext, bittype)
for field in self.fields)
def get_field_list(self) -> List[Field]:
return self.fields
def is_homogeneous(self) -> bool:
return len(self.fields) == 1
def get_width(self) -> int:
'''Get the width of the fields in the register in bits
This counts dead space between and below fields, so it's calculated as
one more than the highest msb.
'''
# self.fields is ordered by (increasing) LSB, so we can find the MSB of
# the register by taking the MSB of the last field.
return 1 + self.fields[-1].bits.msb
def make_multi(self,
reg_width: int,
offset: int,
creg_idx: int,
creg_count: int,
regwen_multi: bool,
compact: bool,
min_reg_idx: int,
max_reg_idx: int,
cname: str) -> 'Register':
'''Generate a numbered, packed version of the register'''
assert 0 <= creg_idx < creg_count
assert 0 <= min_reg_idx <= max_reg_idx
assert compact or (min_reg_idx == max_reg_idx)
new_name = ('{}_{}'.format(self.name, creg_idx)
if creg_count > 1
else self.name)
if self.regwen is None or not regwen_multi or creg_count == 1:
new_regwen = self.regwen
else:
new_regwen = '{}_{}'.format(self.regwen, creg_idx)
strip_field = creg_idx > 0
if compact:
# Compacting multiple registers into a single "compacted" register.
# This is only supported if we have exactly one field (checked at
# the call-site)
assert len(self.fields) == 1
new_fields = self.fields[0].make_multi(reg_width,
min_reg_idx, max_reg_idx,
cname, creg_idx,
strip_field)
else:
# No compacting going on, but we still choose to rename the fields
# to match the registers
assert creg_idx == min_reg_idx
new_fields = [field.make_suffixed('_{}'.format(creg_idx),
cname, creg_idx, strip_field)
for field in self.fields]
# Don't specify a reset value for the new register. Any reset value
# defined for the original register will have propagated to its fields,
# so when we combine them here, the Register constructor can compute a
# reset value for us (which might well be different from self.resval if
# we've replicated fields).
new_resval = None
return Register(offset, new_name, self.desc,
self.swaccess, self.hwaccess,
self.hwext, self.hwqe, self.hwre, new_regwen,
self.tags, new_resval, self.shadowed, new_fields,
self.update_err_alert, self.storage_err_alert)
def _asdict(self) -> Dict[str, object]:
rd = {
'name': self.name,
'desc': self.desc,
'fields': self.fields,
'swaccess': self.swaccess.key,
'hwaccess': self.hwaccess.key,
'hwext': str(self.hwext),
'hwqe': str(self.hwqe),
'hwre': str(self.hwre),
'tags': self.tags,
'shadowed': str(self.shadowed),
}
if self.regwen is not None:
rd['regwen'] = self.regwen
if self.update_err_alert is not None:
rd['update_err_alert'] = self.update_err_alert
if self.storage_err_alert is not None:
rd['storage_err_alert'] = self.storage_err_alert
return rd
| [
"rswarbrick@gmail.com"
] | rswarbrick@gmail.com |
2f84b40b125e8d1ca2183e5d813edf383ecb7ec2 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/92/usersdata/250/37892/submittedfiles/atividade.py | d3db75de167513632efef98cec11cea616e0aa85 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | # -*- coding: utf-8 -*-
import math
n=int(input('digite um vlor:'))
i=1
contaor=0
while i<=n:
if n>0:
s=i/(n-1)
contador=0
i=i+1
print('%.5f'%s)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
ac27f07c1d4dcebea6d5af25abd002e5e717a87c | eaf4408fd01ced7acbee7bd72cbae386c6249842 | /Projects/01-MidtermProject/tests/q1_14a.py | da8b80dab10b6f009f005d279943667830c30639 | [] | no_license | ucsd-ets/dsc10-wi21 | 54176ac31bf5bed75ab33bb670f7aec6358fd886 | 9ffe29f5af2cc58b58a08c82943f91a17b90fe91 | refs/heads/main | 2023-03-21T04:32:42.873980 | 2021-03-13T02:59:59 | 2021-03-13T02:59:59 | 325,040,482 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | test = { 'name': 'q1_14a',
'points': 1,
'suites': [{'cases': [{'code': '>>> isinstance(weekday_pos_avg, float)\nTrue', 'hidden': False, 'locked': False}], 'scored': True, 'setup': '', 'teardown': '', 'type': 'doctest'}]}
| [
"yal319@ucsd.edu"
] | yal319@ucsd.edu |
d94fa9f50b45c677b965d5e716e5003ff19c3882 | c05ed32f1ef7e1eb7d73efd674e7d1fd710ad171 | /daily-coding-problems/problem231.py | 9db45bfef6c22729c0abe839d101f948f9ed752e | [] | no_license | carlhinderer/python-exercises | c8367517fdf835fa1117f96dbfee3dccc596afa6 | 4e09bbb4c4e2bd5644ed50e997db9f3c289a18f7 | refs/heads/master | 2021-06-01T16:17:00.389134 | 2021-02-09T18:21:01 | 2021-02-09T18:21:01 | 150,902,917 | 0 | 0 | null | 2021-04-20T20:33:11 | 2018-09-29T21:03:36 | Python | UTF-8 | Python | false | false | 286 | py | # Problem 231
# Easy
# Asked by IBM
#
# Given a string with repeated characters, rearrange the string so that no two
# adjacent characters are the same. If this is not possible, return None.
#
# For example, given "aaabbc", you could return "ababac". Given "aaab", return None.
# | [
"carl.hinderer4@gmail.com"
] | carl.hinderer4@gmail.com |
0e3cb13687bee190c90eda8bf825e45860f50c81 | 757705e98cc059b0ada491489660ac3bd6e49607 | /udun/balrog/updates.py | 49ddba3688bd32b4c19bca89934d0e2cbe18b651 | [] | no_license | mozilla-services/udun-bridge | 997ef4b83c0e33cab4a12ce25aef13843e279896 | c9494b89416c181c8de2771542089588e3087040 | refs/heads/master | 2021-01-10T06:11:23.530733 | 2019-03-28T14:12:08 | 2019-03-28T14:12:08 | 44,543,019 | 3 | 4 | null | 2019-03-28T14:12:10 | 2015-10-19T15:08:04 | Python | UTF-8 | Python | false | false | 801 | py | # tarek from
# https://hg.mozilla.org/build/tools/file/default/lib/python/balrog/submitter/
# version 9f4e6a2eafa1
import jsonmerge
def merge_partial_updates(base_obj, new_obj):
"""Merges 2 update objects, merging partials and replacing completes"""
schema = {
"properties": {
# Merge partials using fileUrl as an identifier field
"partials": {
"mergeStrategy": "arrayMergeById",
"mergeOptions": {
"idRef": "from"
}
},
# Replace completes - we don't usually have more than one
"completes": {
"mergeStrategy": "overwrite"
}
}
}
merger = jsonmerge.Merger(schema=schema)
return merger.merge(base_obj, new_obj)
| [
"tarek@ziade.org"
] | tarek@ziade.org |
7680615359e2adab26c0a4e6929c0f2a1f392e18 | e2de3f6fe4373f1d98b67af61dd558a813250d54 | /Algorithm/baekjoon/2312_수복원하기.py | b65b63e92206cee06f30380b4dcfbacde3b509fd | [] | no_license | Hansung-Lee/TIL | 3fd6d48427a8b24f7889116297143855d493535b | c24ebab8b631f5c1b835fdc8bd036acbebc8d187 | refs/heads/master | 2020-04-14T11:18:54.035863 | 2019-04-05T07:26:55 | 2019-04-05T07:26:55 | 163,810,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | T = int(input())
for t in range(T):
N = int(input())
li = [0] * (N-1)
for i in range(len(li)):
while not N%(i+2):
N = N//(i+2)
li[i] += 1
for i in range(len(li)):
if li[i]:
print (f"{i+2} {li[i]}") | [
"ajtwlsgkst@naver.com"
] | ajtwlsgkst@naver.com |
4a5cf4b293ba338af36939e2ae54ccc79a1b1a16 | f33364172d2408304fbc5064774d8a864f7c1478 | /django_products/app/views_user.py | fee116c43f4275f85831dbcdcd6eb4f58ec3952e | [] | no_license | pytutorial/py2005E | 4506e13ef37810b7f5b20fcafbaee1467f9f6e97 | 7765a2d812def499ab2a8eb7dff3ad3cdcd7716f | refs/heads/master | 2022-12-09T23:09:26.657784 | 2020-09-11T13:38:05 | 2020-09-11T13:38:05 | 284,680,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,841 | py | from django.shortcuts import render, redirect
from .models import *
from .forms import OrderForm, SearchForm
from datetime import datetime
import math
def getPriceRangeValue(priceRange):
if str(priceRange) == '1': return None, 10
if str(priceRange) == '2': return 10, 20
if str(priceRange) == '3': return 20, None
return None, None
def searchProduct(data):
name = data.get('name')
categ = data.get('category')
priceRange = data.get('priceRange')
productList = Product.objects.all()
if name:
productList = productList.filter(name__contains=name)
if categ:
productList = productList.filter(category__id=categ)
minPrice, maxPrice = getPriceRangeValue(priceRange)
if minPrice:
productList = productList.filter(price__gte=minPrice*1e6)
if maxPrice:
productList = productList.filter(price__lte=maxPrice*1e6)
return productList
def createQueryString(data):
name = data.get('name', '')
category = data.get('category', '')
priceRange = data.get('priceRange', '')
return f'/?name={name}&category={category}&priceRange={priceRange}'
def index(request):
PAGE_SIZE = 3
form = SearchForm(request.GET)
productList = searchProduct(request.GET)
page = int(request.GET.get('page', 1))
start = (page-1)*PAGE_SIZE
end = page*PAGE_SIZE
total = len(productList)
num_page = math.ceil(total/PAGE_SIZE)
context = {
'productList': productList[start:end],
'total': total,
'num_page': num_page,
'page': page,
'next_page': page + 1 if page < num_page else None,
'prev_page': page - 1 if page > 1 else None,
'form': form,
'query_str': createQueryString(request.GET),
}
return render(request, 'user/index.html', context)
def viewProduct(request, pk):
product = Product.objects.get(pk=pk)
context = {'product': product}
return render(request, 'user/view_product.html', context)
def saveOrder(product, data):
order = Order()
order.product = product
order.priceUnit = product.price
order.qty = data['qty']
order.fullname = data['fullname']
order.phone = data['phone']
order.address = data['address']
order.orderDate = datetime.now()
order.status = Order.OrderStatus.PENDING
order.save()
def orderProduct(request, pk):
product = Product.objects.get(pk=pk)
form = OrderForm(initial={'qty': 1})
if request.method == 'POST':
form = OrderForm(request.POST)
if form.is_valid():
saveOrder(product, form.cleaned_data)
return redirect('/thank_you')
context = {'product': product, 'form': form}
return render(request,'user/order_product.html', context)
def thankYou(request):
return render(request, 'user/thank_you.html') | [
"duongthanhtungvn01@gmail.com"
] | duongthanhtungvn01@gmail.com |
6bf10ed48864690d532c2218b82c770acc6a402c | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_subdivided.py | 4577fabad755648b5b6de69c364b017a654052bb | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py |
from xai.brain.wordbase.verbs._subdivide import _SUBDIVIDE
#calss header
class _SUBDIVIDED(_SUBDIVIDE, ):
def __init__(self,):
_SUBDIVIDE.__init__(self)
self.name = "SUBDIVIDED"
self.specie = 'verbs'
self.basic = "subdivide"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
c8033a45df8ddb48b786a6272cfb49e950150c1a | 6a8bc7da3104726f894ae360fce6a43a54b30812 | /gradio/themes/app.py | 0c4c5a5e4050d81fc1ba684175f285cfef7670db | [
"Apache-2.0"
] | permissive | gradio-app/gradio | 0b6b29bb0029ad3b8fc1b143f111b1230b29d23a | e4e7a4319924aaf51dcb18d07d0c9953d4011074 | refs/heads/main | 2023-09-01T10:56:50.822550 | 2023-09-01T00:28:01 | 2023-09-01T00:28:01 | 162,405,963 | 21,224 | 1,537 | Apache-2.0 | 2023-09-14T21:42:00 | 2018-12-19T08:24:04 | Python | UTF-8 | Python | false | false | 5,249 | py | import time
import gradio as gr
from gradio.themes.utils.theme_dropdown import create_theme_dropdown
dropdown, js = create_theme_dropdown()
with gr.Blocks(theme=gr.themes.Default()) as demo:
with gr.Row().style(equal_height=True):
with gr.Column(scale=10):
gr.Markdown(
"""
# Theme preview: `{THEME}`
To use this theme, set `theme='{AUTHOR}/{SPACE_NAME}'` in `gr.Blocks()` or `gr.Interface()`.
You can append an `@` and a semantic version expression, e.g. @>=1.0.0,<2.0.0 to pin to a given version
of this theme.
"""
)
with gr.Column(scale=3):
with gr.Box():
dropdown.render()
toggle_dark = gr.Button(value="Toggle Dark").style(full_width=True)
dropdown.change(None, dropdown, None, _js=js)
toggle_dark.click(
None,
_js="""
() => {
document.body.classList.toggle('dark');
}
""",
)
name = gr.Textbox(
label="Name",
info="Full name, including middle name. No special characters.",
placeholder="John Doe",
value="John Doe",
interactive=True,
)
with gr.Row():
slider1 = gr.Slider(label="Slider 1")
slider2 = gr.Slider(label="Slider 2")
gr.CheckboxGroup(["A", "B", "C"], label="Checkbox Group")
with gr.Row():
with gr.Column(variant="panel", scale=1):
gr.Markdown("## Panel 1")
radio = gr.Radio(
["A", "B", "C"],
label="Radio",
info="Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.",
)
drop = gr.Dropdown(["Option 1", "Option 2", "Option 3"], show_label=False)
drop_2 = gr.Dropdown(
["Option A", "Option B", "Option C"],
multiselect=True,
value=["Option A"],
label="Dropdown",
interactive=True,
)
check = gr.Checkbox(label="Go")
with gr.Column(variant="panel", scale=2):
img = gr.Image(
"https://gradio-static-files.s3.us-west-2.amazonaws.com/header-image.jpg",
label="Image",
).style(height=320)
with gr.Row():
go_btn = gr.Button("Go", label="Primary Button", variant="primary")
clear_btn = gr.Button(
"Clear", label="Secondary Button", variant="secondary"
)
def go(*args):
time.sleep(3)
return "https://gradio-static-files.s3.us-west-2.amazonaws.com/header-image.jpgjpg"
go_btn.click(go, [radio, drop, drop_2, check, name], img, api_name="go")
def clear():
time.sleep(0.2)
return None
clear_btn.click(clear, None, img)
with gr.Row():
btn1 = gr.Button("Button 1").style(size="sm")
btn2 = gr.UploadButton().style(size="sm")
stop_btn = gr.Button("Stop", label="Stop Button", variant="stop").style(
size="sm"
)
with gr.Row():
gr.Dataframe(value=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], label="Dataframe")
gr.JSON(
value={"a": 1, "b": 2, "c": {"test": "a", "test2": [1, 2, 3]}}, label="JSON"
)
gr.Label(value={"cat": 0.7, "dog": 0.2, "fish": 0.1})
gr.File()
with gr.Row():
gr.ColorPicker()
gr.Video("https://gradio-static-files.s3.us-west-2.amazonaws.com/world.mp4")
gr.Gallery(
[
(
"https://gradio-static-files.s3.us-west-2.amazonaws.com/lion.jpg",
"lion",
),
(
"https://gradio-static-files.s3.us-west-2.amazonaws.com/logo.png",
"logo",
),
(
"https://gradio-static-files.s3.us-west-2.amazonaws.com/tower.jpg",
"tower",
),
]
).style(height="200px", grid=2)
with gr.Row():
with gr.Column(scale=2):
chatbot = gr.Chatbot([("Hello", "Hi")], label="Chatbot")
chat_btn = gr.Button("Add messages")
def chat(history):
time.sleep(2)
yield [["How are you?", "I am good."]]
chat_btn.click(
lambda history: history
+ [["How are you?", "I am good."]]
+ (time.sleep(2) or []),
chatbot,
chatbot,
)
with gr.Column(scale=1):
with gr.Accordion("Advanced Settings"):
gr.Markdown("Hello")
gr.Number(label="Chatbot control 1")
gr.Number(label="Chatbot control 2")
gr.Number(label="Chatbot control 3")
if __name__ == "__main__":
demo.queue().launch()
| [
"noreply@github.com"
] | gradio-app.noreply@github.com |
c6bbd6594ae99077c6c3adcd262581a0fa470645 | ecad2803537295a24fe8274f99dfb85ead3a7191 | /debian/tmp/usr/lib/python2.7/dist-packages/nova/api/openstack/compute/ips.py | 6ad888fd720bc78a4e2aa588ff7eaa17984c7526 | [
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | permissive | bopopescu/stacklab-nova | 98400585ec3b4e3e94269dcb41578fffe7e2c8c1 | 4ab1698659b663ef222255610d1a5c042706dd65 | refs/heads/master | 2022-11-20T12:07:18.250829 | 2012-12-13T04:43:00 | 2012-12-13T04:43:00 | 282,166,345 | 0 | 0 | Apache-2.0 | 2020-07-24T08:31:57 | 2020-07-24T08:31:56 | null | UTF-8 | Python | false | false | 3,472 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
import nova
from nova.api.openstack import common
from nova.api.openstack.compute.views import addresses as view_addresses
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import flags
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
def make_network(elem):
elem.set('id', 0)
ip = xmlutil.SubTemplateElement(elem, 'ip', selector=1)
ip.set('version')
ip.set('addr')
network_nsmap = {None: xmlutil.XMLNS_V11}
class NetworkTemplate(xmlutil.TemplateBuilder):
def construct(self):
sel = xmlutil.Selector(xmlutil.get_items, 0)
root = xmlutil.TemplateElement('network', selector=sel)
make_network(root)
return xmlutil.MasterTemplate(root, 1, nsmap=network_nsmap)
class AddressesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('addresses', selector='addresses')
elem = xmlutil.SubTemplateElement(root, 'network',
selector=xmlutil.get_items)
make_network(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=network_nsmap)
class Controller(wsgi.Controller):
"""The servers addresses API controller for the OpenStack API."""
_view_builder_class = view_addresses.ViewBuilder
def __init__(self, **kwargs):
super(Controller, self).__init__(**kwargs)
self._compute_api = nova.compute.API()
def _get_instance(self, context, server_id):
try:
instance = self._compute_api.get(context, server_id)
except nova.exception.NotFound:
msg = _("Instance does not exist")
raise exc.HTTPNotFound(explanation=msg)
return instance
def create(self, req, server_id, body):
raise exc.HTTPNotImplemented()
def delete(self, req, server_id, id):
raise exc.HTTPNotImplemented()
@wsgi.serializers(xml=AddressesTemplate)
def index(self, req, server_id):
context = req.environ["nova.context"]
instance = self._get_instance(context, server_id)
networks = common.get_networks_for_instance(context, instance)
return self._view_builder.index(networks)
@wsgi.serializers(xml=NetworkTemplate)
def show(self, req, server_id, id):
context = req.environ["nova.context"]
instance = self._get_instance(context, server_id)
networks = common.get_networks_for_instance(context, instance)
if id not in networks:
msg = _("Instance is not a member of specified network")
raise exc.HTTPNotFound(explanation=msg)
return self._view_builder.show(networks[id], id)
def create_resource():
return wsgi.Resource(Controller())
| [
"yuanotes@gmail.com"
] | yuanotes@gmail.com |
5cafe1b409dddcffba908a42582d4795944108ff | 74ab22a81ac24e6e13335b6873674de180b14c26 | /search/search_dictionary.py | 1a8acef12ca20267c97848bf4832af217fc04276 | [] | no_license | attiakihal/MazeSolver | e92d39c62a582b564bfb437c8dde06754407c626 | f737b3f4236884a70df0e35977704fe0d2836292 | refs/heads/master | 2022-04-22T22:30:40.153668 | 2019-12-08T00:56:42 | 2019-12-08T00:56:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | from search.a_star_euclidean import a_star_euclidean
from search.a_star_manhattan import a_star_manhattan
from search.bidirectional_bfs import bidirectional_bfs
from search.bfs import bfs
from search.dfs import dfs
search_dictionary = {
"a_star_euclidean": a_star_euclidean,
"a_star_manhattan": a_star_manhattan,
"dfs": dfs,
"bfs": bfs,
"bidirectional_bfs": bidirectional_bfs
}
| [
"jonathan@Jonathans-MacBook-Pro.local"
] | jonathan@Jonathans-MacBook-Pro.local |
e5fae4b360e137a154ade338c6c6deca8b1e06e0 | d4157df22a19225b23e52476e00d854409b1f43c | /LogisticRegression/Regularized/CostReg.py | 3709d94c0be8f818e14101053f38ab8635f9bd3e | [] | no_license | shan18/Solutions-to-Machine-Learning-by-Andrew-Ng | fc4f3cd49b807ef9ce91586a4de027aa1520b04f | bcdd3a34da925944c5e03ebcf3b2c6998f731c87 | refs/heads/master | 2020-12-02T21:25:05.049786 | 2017-10-01T11:19:34 | 2017-10-01T11:19:34 | 96,312,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | import numpy as np
from LogisticRegression.Sigmoid import sigmoid
def cost_reg(theta, X, y, learning_rate):
theta = np.matrix(theta)
X = np.matrix(X)
y = np.matrix(y)
first = np.multiply(-y, np.log(sigmoid(X * theta.T)))
second = np.multiply(1 - y, np.log(1 - sigmoid(X * theta.T)))
reg = (learning_rate / (2 * len(X))) * np.sum(np.power(theta[:, 1:], 2))
return np.sum(first - second) / len(X) + reg
| [
"thegeek.004@gmail.com"
] | thegeek.004@gmail.com |
2e523a5434e9acf6c16fdb2354db39bc2eda42dc | 37a3c2ae904998a10ad6ec9f3cd715cdda718b21 | /pythonforandroid/recipes/opencv/__init__.py | 7e70162ea315ee1144d82eb1fb473565ecba8c89 | [
"Python-2.0",
"MIT"
] | permissive | agilewalker/python-for-android | 311a97422545b5861baaeeb9c52ee7f53959acb8 | a2fb5ecbc09c4847adbcfd03c6b1ca62b3d09b8d | refs/heads/master | 2021-09-14T07:14:02.044913 | 2017-12-12T08:13:23 | 2018-01-04T12:54:31 | 113,962,045 | 1 | 2 | MIT | 2018-05-09T11:29:50 | 2017-12-12T08:11:25 | Python | UTF-8 | Python | false | false | 2,228 | py | import os
import sh
from pythonforandroid.toolchain import (
NDKRecipe,
Recipe,
current_directory,
info,
shprint,
)
from multiprocessing import cpu_count
class OpenCVRecipe(NDKRecipe):
version = '2.4.10.1'
url = 'https://github.com/Itseez/opencv/archive/{version}.zip'
#md5sum = '2ddfa98e867e6611254040df841186dc'
depends = ['numpy']
patches = ['patches/p4a_build-2.4.10.1.patch']
generated_libraries = ['cv2.so']
def prebuild_arch(self, arch):
self.apply_patches(arch)
def get_recipe_env(self,arch):
env = super(OpenCVRecipe, self).get_recipe_env(arch)
env['PYTHON_ROOT'] = self.ctx.get_python_install_dir()
env['ANDROID_NDK'] = self.ctx.ndk_dir
env['ANDROID_SDK'] = self.ctx.sdk_dir
env['SITEPACKAGES_PATH'] = self.ctx.get_site_packages_dir()
return env
def build_arch(self, arch):
with current_directory(self.get_build_dir(arch.arch)):
env = self.get_recipe_env(arch)
cvsrc = self.get_build_dir(arch.arch)
lib_dir = os.path.join(self.ctx.get_python_install_dir(), "lib")
shprint(sh.cmake,
'-DP4A=ON','-DANDROID_ABI={}'.format(arch.arch),
'-DCMAKE_TOOLCHAIN_FILE={}/platforms/android/android.toolchain.cmake'.format(cvsrc),
'-DPYTHON_INCLUDE_PATH={}/include/python2.7'.format(env['PYTHON_ROOT']),
'-DPYTHON_LIBRARY={}/lib/libpython2.7.so'.format(env['PYTHON_ROOT']),
'-DPYTHON_NUMPY_INCLUDE_DIR={}/numpy/core/include'.format(env['SITEPACKAGES_PATH']),
'-DANDROID_EXECUTABLE={}/tools/android'.format(env['ANDROID_SDK']),
'-DBUILD_TESTS=OFF', '-DBUILD_PERF_TESTS=OFF', '-DBUILD_EXAMPLES=OFF', '-DBUILD_ANDROID_EXAMPLES=OFF',
'-DPYTHON_PACKAGES_PATH={}'.format(env['SITEPACKAGES_PATH']),
cvsrc,
_env=env)
shprint(sh.make,'-j',str(cpu_count()),'opencv_python')
shprint(sh.cmake,'-DCOMPONENT=python','-P','./cmake_install.cmake')
sh.cp('-a',sh.glob('./lib/{}/lib*.so'.format(arch.arch)),lib_dir)
recipe = OpenCVRecipe()
| [
"frmdstryr@gmail.com"
] | frmdstryr@gmail.com |
45b1b8c35f3cf8c84f1fe49e5ff6aa9be0228989 | 6ab67facf12280fedf7cc47c61ae91da0bcf7339 | /service/yowsup/yowsup/demos/echoclient/layer.py | df0d73c7ce854b213f893a6fb7a43a51cb2d7fde | [
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
] | permissive | PuneethReddyHC/whatsapp-rest-webservice | 2f035a08a506431c40b9ff0f333953b855f9c461 | 822dfc46b80e7a26eb553e5a10e723dda5a9f77d | refs/heads/master | 2022-09-17T14:31:17.273339 | 2017-11-27T11:16:43 | 2017-11-27T11:16:43 | 278,612,537 | 0 | 1 | MIT | 2020-07-10T11:04:42 | 2020-07-10T11:04:41 | null | UTF-8 | Python | false | false | 1,646 | py | from yowsup.layers.interface import YowInterfaceLayer, ProtocolEntityCallback
class EchoLayer(YowInterfaceLayer):
@ProtocolEntityCallback("message")
def onMessage(self, messageProtocolEntity):
if messageProtocolEntity.getType() == 'text':
self.onTextMessage(messageProtocolEntity)
elif messageProtocolEntity.getType() == 'media':
self.onMediaMessage(messageProtocolEntity)
self.toLower(messageProtocolEntity.forward(messageProtocolEntity.getFrom()))
self.toLower(messageProtocolEntity.ack())
self.toLower(messageProtocolEntity.ack(True))
@ProtocolEntityCallback("receipt")
def onReceipt(self, entity):
self.toLower(entity.ack())
def onTextMessage(self,messageProtocolEntity):
# just print info
print("Echoing %s to %s" % (messageProtocolEntity.getBody(), messageProtocolEntity.getFrom(False)))
def onMediaMessage(self, messageProtocolEntity):
# just print info
if messageProtocolEntity.getMediaType() == "image":
print("Echoing image %s to %s" % (messageProtocolEntity.url, messageProtocolEntity.getFrom(False)))
elif messageProtocolEntity.getMediaType() == "location":
print("Echoing location (%s, %s) to %s" % (messageProtocolEntity.getLatitude(), messageProtocolEntity.getLongitude(), messageProtocolEntity.getFrom(False)))
elif messageProtocolEntity.getMediaType() == "vcard":
print("Echoing vcard (%s, %s) to %s" % (messageProtocolEntity.getName(), messageProtocolEntity.getCardData(), messageProtocolEntity.getFrom(False)))
| [
"svub@x900.svub.net"
] | svub@x900.svub.net |
6d669d90ebcebb6b738d3b848a30cd772f7906d8 | 9e335834e7be81068f001d5451781d5c1530ebbf | /CorePython/chapter15/my_card.py | 9a24af345fd158da0a3259cb26a492e7278bfb39 | [] | no_license | jtr109/SelfLearning | c1dbffa5485d0cd2f444ea510da62a8e3d269dbc | cc920ed507647762b9855385be76869adac89e7c | refs/heads/master | 2020-04-06T04:11:31.143688 | 2016-07-22T02:19:39 | 2016-07-22T02:19:39 | 58,049,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | py | # !/usr/bin/python
# -*- coding: utf-8 -*-
import re
def is_legal(data):
patt = '(\d{15,16})|(\d{4}-\d{6}-\d{5})|(\d{4}(-\d{4}){3})'
try:
for i in range(1, 4):
if re.match(patt, data).group(i):
pd = purify(data, i)
return legal_card(pd)
except AttributeError:
return False
def purify(data, i):
pn = ''
if i == 1:
pn = data
else:
l = data.split('-')
for n in l:
pn += n
pd = []
for a in pn:
for b in a:
pd.append(b)
return pd
def legal_card(pd):
pd.reverse()
ns = 0
w = 1
for d in pd:
if w % 2 == 1:
d = int(d)
else:
d = 2 * int(d)
if d > 9:
d -= 9
ns += d
w += 1
return ns % 10 == 0
if __name__ == '__main__':
card_number = raw_input('Your card number is:\n> ')
if is_legal(card_number):
print 'It is a legal number!'
else:
print 'Shit! It is an illegal number!!!'
| [
"lyp_login@outlook.com"
] | lyp_login@outlook.com |
5e806261092da4fffa656e0cd15f16202f682184 | b4166044870d1c026e86c95ac41e3e3613ee424f | /python_basic/abc049_a.py | aebd7be45462ec67753e627646e1ba390c23f02a | [] | no_license | nsakki55/AtCoder | 2cbb785415a7c0b9df9953ddc3706c90a5716a03 | 03c428e8eb8f24b8560d00e2388ba75509619690 | refs/heads/master | 2020-05-31T04:33:06.400697 | 2020-01-19T13:41:41 | 2020-01-19T13:41:41 | 190,099,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | c=input()
vowels=['a','e','i','o','u']
print('vowel' if c in vowels else 'consonant') | [
"n.sakki55@gmail.com"
] | n.sakki55@gmail.com |
2025deae0ef8bed10b8e9ccb11cc055a57a5ace7 | 37800583dd1731ce788a22622230386ac4a00bb5 | /propagators/ts.py | de13462bcf628bfe7f0b2af2de8fa63fb966c19b | [
"MIT"
] | permissive | mikelytaev/wave-propagation | 8bff1aadc80fd4fc180f50ebfc2f4b153032b3df | 5fa470234a3a3f22980a99747ecbdf5ac259451c | refs/heads/master | 2023-09-01T10:54:57.742771 | 2023-08-30T17:02:39 | 2023-08-30T17:02:39 | 95,381,841 | 21 | 6 | null | 2018-03-12T19:09:58 | 2017-06-25T19:42:53 | Python | UTF-8 | Python | false | false | 13,916 | py | from dataclasses import dataclass
import logging
import types
from enum import Enum
from copy import deepcopy
import numpy as np
import scipy.linalg as sla
import cmath as cm
from transforms.frft import *
from transforms.fcc_fourier.fcc import *
from transforms.filon import *
class ThinBody2d:
def __init__(self, x1_m: float, x2_m: float, eps_r: complex):
self.x1_m = x1_m
self.x2_m = x2_m
self.eps_r = eps_r
def get_intersecting_intervals(self, x_m):
pass
class Plate(ThinBody2d):
def __init__(self, x0_m, z1_m, z2_m, width_m, eps_r):
self.x1_m = x0_m - width_m / 2
self.x2_m = x0_m + width_m / 2
self.z1_m = z1_m
self.z2_m = z2_m
self.eps_r = eps_r
def get_intersecting_intervals(self, x_m):
if self.x1_m <= x_m <= self.x2_m:
return [(self.z1_m, self.z2_m)]
else:
return []
class Ellipse(ThinBody2d):
def __init__(self, x0, z0, a, b, eps_r):
self.x0 = x0
self.z0 = z0
self.a = a
self.b = b
self.x1_m = self.x0 - self.a
self.x2_m = self.x0 + self.a
self.eps_r = eps_r
def get_intersecting_intervals(self, x_m):
if self.x0 - self.a < x_m < self.x0 + self.a:
t = cm.acos((x_m - self.x0) / self.a)
z = abs(self.b * cm.sin(t))
return [(self.z0 - z, self.z0 + z)]
else:
return []
class SpectralIntegrationMethod(Enum):
fractional_ft = 1,
fcc = 2,
contour = 3
@dataclass
class ThinScatteringComputationalParams:
max_p_k0: float
p_grid_size: int
quadrature_points: int
alpha: float
spectral_integration_method: SpectralIntegrationMethod
h_curve: float = 0.0
use_mean_value_theorem: bool = False
x_grid_m: np.ndarray = None
x_min_m: float = None
x_max_m: float = None
x_grid_size: int = None
dx_m: float = None
z_grid_m: np.ndarray = None
z_min_m: float = None
z_max_m: float = None
z_grid_size: int = None
class ThinScatteringDebugData:
def __init__(self):
self.phi = None
self.rhs = None
self.psi = None
class ThinScattering:
def __init__(self, wavelength, bodies, params: ThinScatteringComputationalParams, fur_q_func: types.FunctionType=None, save_debug=False):
self.bodies = deepcopy(bodies)
self.params = deepcopy(params)
self.k0 = 2 * cm.pi / wavelength
self.max_p = self.params.max_p_k0 * self.k0
#self.p_computational_grid, self.d_p = np.linspace(-self.max_p, self.max_p, self.params.p_grid_size, retstep=True)
if self.params.spectral_integration_method == SpectralIntegrationMethod.fractional_ft:
self.p_computational_grid = get_fcft_grid(self.params.p_grid_size, 2 * self.max_p)
self.p_grid_is_regular = True
self.d_p = self.p_computational_grid[1] - self.p_computational_grid[0]
elif self.params.spectral_integration_method == SpectralIntegrationMethod.fcc:
self.p_computational_grid = chebyshev_grid(-self.max_p, self.max_p, self.params.p_grid_size)
self.p_grid_is_regular = False
t = -np.concatenate((self.p_computational_grid[1::] - self.p_computational_grid[0:-1:],
[self.p_computational_grid[-1] - self.p_computational_grid[-2]]))
_, self.d_p = np.meshgrid(self.p_computational_grid, t, indexing='ij')
elif self.params.spectral_integration_method == SpectralIntegrationMethod.contour:
h = self.params.h_curve
#self.p_grid_h_1 = np.linspace(-self.k0 * self.params.max_p_k0, -h, 3000 + 1)[:-1:] + 1j * h
self.p_grid_h_1 = chebyshev_grid(-self.k0 * self.params.max_p_k0, -h, 3000)[::-1] + 1j * h
#self.p_grid_h_2 = (1 - 1j) * chebyshev_grid(-h, 0, 200)[::-1] # * np.linspace(-h, h, 800)
#self.p_grid_h_3 = (1 - 1j) * chebyshev_grid(0, h, 200)[1::-1]
self.p_grid_h_2 = (1 - 1j) * np.linspace(-h, h, 500)[1:-1:]
self.p_grid_h_3 = np.array([])
#self.p_grid_h_4 = np.linspace(h, self.k0 * self.params.max_p_k0, 3000 + 1)[1::] - 1j * h
self.p_grid_h_4 = chebyshev_grid(h, self.k0 * self.params.max_p_k0, 3000)[::-1] - 1j * h
self.p_computational_grid = np.concatenate((self.p_grid_h_1, self.p_grid_h_2, self.p_grid_h_3, self.p_grid_h_4))
self.params.p_grid_size = len(self.p_computational_grid)
t = np.concatenate((self.p_computational_grid[1::] - self.p_computational_grid[0:-1:],
[self.p_computational_grid[-1] - self.p_computational_grid[-2]]))
_, self.d_p = np.meshgrid(self.p_computational_grid, t, indexing='ij')
self.p_grid_is_regular = False
else:
raise Exception("Specified integration method does not supported")
if self.params.x_grid_m is not None:
self.x_computational_grid = self.params.x_grid_m
elif self.params.x_grid_size is not None:
self.x_computational_grid = np.linspace(self.params.x_min_m, self.params.x_max_m, self.params.x_grid_size)
elif self.params.dx_m is not None:
self.x_computational_grid = np.arange(self.params.x_min_m, self.params.x_max_m, self.params.dx_m)
self.params.x_grid_size = len(self.x_computational_grid)
else:
raise Exception("x grid parameters not specified")
if self.params.z_grid_m is not None:
if self.params.spectral_integration_method == SpectralIntegrationMethod.fractional_ft:
raise Exception("Arbitrary z grid not supported")
self.z_computational_grid = self.params.z_grid_m
elif self.params.spectral_integration_method == SpectralIntegrationMethod.fractional_ft:
self.z_computational_grid = get_fcft_grid(self.params.p_grid_size, self.params.z_max_m * 2)
elif self.params.spectral_integration_method == SpectralIntegrationMethod.fcc:
self.z_computational_grid = np.linspace(self.params.z_min_m, self.params.z_max_m, self.params.z_grid_size)
elif self.params.spectral_integration_method == SpectralIntegrationMethod.contour:
self.z_computational_grid = np.linspace(self.params.z_min_m, self.params.z_max_m, self.params.z_grid_size)
if self.params.use_mean_value_theorem and self.params.quadrature_points > 1:
raise Exception("not supported")
self.quad_x_grid = np.empty((len(self.bodies), self.params.quadrature_points))
self.quad_weights = np.empty((len(self.bodies), self.params.quadrature_points))
for body_index, body in enumerate(self.bodies):
if self.params.quadrature_points == 1:
self.quad_x_grid[body_index] = np.array([(body.x1_m + body.x2_m) / 2])
self.quad_weights[body_index] = np.array([body.x2_m - body.x1_m])
else:
self.quad_x_grid[body_index, :], dx = np.linspace(body.x1_m, body.x2_m, self.params.quadrature_points, retstep=True)
self.quad_weights[body_index, :] = np.concatenate(([dx / 2], np.repeat(dx, self.params.quadrature_points - 2), [dx / 2]))
if fur_q_func is None:
self.qrc_q = self.p_computational_grid*0 + 1 / cm.sqrt(2*cm.pi)
else:
self.qrc_q = fur_q_func(self.p_computational_grid)
self.super_ker_size = len(self.p_computational_grid) * self.params.quadrature_points * len(bodies)
gms = self.super_ker_size**2 * 16 /1024/1024
logging.debug("matrix %d x %d, size = %d mb", self.super_ker_size, self.super_ker_size, gms)
self.debug_data = ThinScatteringDebugData() if save_debug else None
def green_function(self, x, xsh, p):
if len(p.shape) == 2:
xv, xshv, pv = x, xsh, p
else:
xv, xshv, pv = np.meshgrid(x, xsh, p, indexing='ij')
tgvp = self._gamma(pv)
gv = -1 / (2 * tgvp) * np.exp(-tgvp * np.abs(xv - xshv))
return np.squeeze(gv)
def _gamma(self, p):
alpha = self.params.alpha
a = np.sqrt((np.sqrt((self.k0 ** 2 - p ** 2) ** 2 + (alpha*self.k0 ** 2) ** 2) - (self.k0 ** 2 - p ** 2)) / 2)
d = -np.sqrt((np.sqrt((self.k0 ** 2 - p ** 2) ** 2 + (alpha*self.k0 ** 2) ** 2) + (self.k0 ** 2 - p ** 2)) / 2)
return a + 1j*d
def _ker(self, body_number_i, body_number_j, i, j):
pv, pshv = np.meshgrid(self.p_computational_grid, self.p_computational_grid, indexing='ij')
return self.k0 ** 2 / cm.sqrt(2*cm.pi) * self._body_z_fourier(body_number_i, self.quad_x_grid[body_number_i][i], pv - pshv) * \
self._integral_green_function(self.quad_weights[body_number_i][i], self.quad_x_grid[body_number_i][i], self.quad_x_grid[body_number_j][j], pshv) * self.d_p
def _rhs(self, body_number, i):
pv, pshv = np.meshgrid(self.p_computational_grid, self.p_computational_grid, indexing='ij')
_, qshv = np.meshgrid(self.p_computational_grid, self.qrc_q, indexing='ij')
m = self._body_z_fourier(body_number, self.quad_x_grid[body_number][i], pv - pshv) * \
self._integral_green_function(self.quad_weights[body_number][i], self.quad_x_grid[body_number][i], 0, pshv) * qshv
return np.sum(m * self.d_p, axis=1) # TODO improve integral approximation??
def _body_z_fourier(self, body_number, x_m, p):
intervals = self.bodies[body_number].get_intersecting_intervals(x_m)
res = np.zeros(p.shape, dtype=complex)
p_ind = abs(p) < 0.0000001
f = np.empty(res.shape, dtype=complex)
for (a, b) in intervals:
f[np.logical_not(p_ind)] = 1j / p[np.logical_not(p_ind)] * (np.exp(-1j * p[np.logical_not(p_ind)] * b) -
np.exp(-1j * p[np.logical_not(p_ind)] * a))
f[p_ind] = b - a
res += f
res *= 1 / cm.sqrt(2*cm.pi) * (self.bodies[body_number].eps_r - 1)
return res
def _super_ker(self):
sk = np.empty((self.super_ker_size, self.super_ker_size), dtype=complex)
ks = len(self.p_computational_grid)
t = len(self.p_computational_grid) * self.params.quadrature_points
for body_i in range(0, len(self.bodies)):
for body_j in range(0, len(self.bodies)):
for x_i in range(0, len(self.quad_x_grid[body_i])):
for x_j in range(0, len(self.quad_x_grid[body_j])):
sk[(body_i*t + ks*x_i):(body_i*t + ks*(x_i + 1)):, (body_j*t + ks*x_j):(body_j*t + ks*(x_j + 1)):] = \
self._ker(body_i, body_j, x_i, x_j)
return sk
def _super_rhs(self):
rs = np.empty(self.super_ker_size, dtype=complex)
ks = len(self.p_computational_grid)
t = len(self.p_computational_grid) * self.params.quadrature_points
for body_i in range(0, len(self.bodies)):
for x_i in range(0, len(self.quad_x_grid[body_i])):
rs[(body_i*t + ks*x_i):(body_i*t + ks*(x_i + 1)):] = self._rhs(body_i, x_i)
return rs
def _integral_green_function(self, h, xi, xj, p):
"""
\int\limits _{x_{i}-h/2}^{x_{i}+h/2}\tilde{G}(x',x_{j},p')dx'
"""
if self.params.use_mean_value_theorem:
if abs(xi - xj) < 0.00000001:
return 1 / self._gamma(p)**2 * (np.exp(-self._gamma(p)*h/2)-1)
else:
return 1 / (2*self._gamma(p)**2) * np.exp(-self._gamma(p)*abs(xi-xj))*(np.exp(-self._gamma(p)*h/2) - np.exp(self._gamma(p)*h/2))
else:
return self.green_function(xi, xj, p) * h
def calculate(self):
if len(self.bodies) > 0:
logging.debug("Preparing kernel")
logging.debug("Preparing right-hand side")
rhs = self._super_rhs()
#logging.debug("||ker|| = %d, cond(ker) = %d", np.linalg.norm(ker), np.linalg.cond(ker))
#logging.debug("||rhs|| = %d", np.linalg.norm(rhs))
left = np.eye(self.super_ker_size) + self._super_ker()
logging.debug("Solving system of integral equations")
super_phi = sla.solve(left, rhs)
#logging.debug("||s_phi|| = %d", np.linalg.norm(super_phi))
if self.debug_data:
self.debug_data.phi = super_phi
self.debug_data.rhs = rhs
logging.debug("Preparing psi")
ks = len(self.p_computational_grid)
t = len(self.p_computational_grid) * self.params.quadrature_points
psi = self.green_function(self.x_computational_grid, 0, self.p_computational_grid) * self.qrc_q
for body_i in range(0, len(self.bodies)):
for x_i in range(0, len(self.quad_x_grid[body_i])):
phi = super_phi[(body_i*t + ks*x_i):(body_i*t + ks*(x_i + 1)):]
psi += -self.k0**2/cm.sqrt(2*cm.pi) * self.green_function(
self.x_computational_grid, self.quad_x_grid[body_i][x_i], self.p_computational_grid) * phi
if self.debug_data:
self.debug_data.psi = psi
logging.debug("Calculating inverse Fourier transform")
if self.params.spectral_integration_method == SpectralIntegrationMethod.fractional_ft:
res = ifcft(psi, 2 * self.max_p, -self.params.z_max_m, self.params.z_max_m)
elif self.params.spectral_integration_method == SpectralIntegrationMethod.fcc:
res = 1 / cm.sqrt(2 * cm.pi) * FCCFourier(2 * self.max_p, self.params.p_grid_size, -self.z_computational_grid).forward(psi.T, -self.max_p, self.max_p).T
elif self.params.spectral_integration_method == SpectralIntegrationMethod.contour:
res = 1 / cm.sqrt(2 * cm.pi) * filon_trapezoidal_ft(psi.T, self.p_computational_grid, -self.z_computational_grid).T
return res
| [
"mikelytaev@gmail.com"
] | mikelytaev@gmail.com |
5c28d0b822397cf4e9ac5940634bfe9334e9babc | f2cc45e46a55c09570574eeaf358919491b4fae9 | /作业/7石头剪刀布.py | e48814caaf4f0f0203e12107fa0f806dc29be394 | [] | no_license | 2099454967/python_wang | f4f5e10891ed9be6112f8f3d0d6313975e2f914f | c9dd8a5f6a9864d2a3e61bad9a12cea566ebdec9 | refs/heads/master | 2020-03-11T17:43:43.288920 | 2018-04-24T05:27:53 | 2018-04-24T05:27:53 | 130,155,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | #1---石头
#2---剪刀
#3---布
import random
a = random.randint(1,3)
b = int(input('请输入1---石头 2---剪刀 3---布'))
if (a==1 and b==2) or (a==2 and b==3) or (a==3 and b==1):
print('玩家赢')
elif a==b:
print('平局')
else:
print('电脑赢')
| [
"2099454967@qq.com"
] | 2099454967@qq.com |
3388e852acf0ee62545acf09ca024839f5401c63 | 67c0d7351c145d756b2a49e048500ff361f7add6 | /xpresso/ai/admin/infra/packages/ubuntu/utility/docker_distribution_package.py | 81af3bf83624d6377f690deef24079eac561889b | [] | no_license | Krishnaarunangsu/XpressoDataHandling | ba339ae85b52e30715f47406ddb74966350848aa | 0637a465088b468d6fdb6d1bb6f7b087547cec56 | refs/heads/master | 2020-06-27T19:58:43.358340 | 2019-08-29T16:59:08 | 2019-08-29T16:59:08 | 200,035,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,425 | py | """Abstract base class for packages object"""
__all__ = ['DockerDistributionPackage']
__author__ = 'Naveen Sinha'
import os
import shutil
from xpresso.ai.admin.infra.packages.abstract_package import AbstractPackage
from xpresso.ai.admin.infra.packages.local_shell_executor import \
LocalShellExecutor
from xpresso.ai.core.utils.xpr_config_parser import XprConfigParser
from xpresso.ai.admin.controller.exceptions.xpr_exceptions import\
PackageFailedException
class DockerDistributionPackage(AbstractPackage):
"""
Installs Docker Distribution Services. It installs open source Harbor
project to manage the docker registry. This installs the Harbor project
only.
"""
CONFIG_SECTION = "docker_distribution"
HARBOR_CFG_FILE = "harbor_cfg_file"
HARBOR_COMPOSE_FILE = "harbor_compose_file"
HARBOR_TMP_FOLDER = "harbor_folder"
def __init__(self, config_path=XprConfigParser.DEFAULT_CONFIG_PATH,
executor=None):
if not executor:
executor = LocalShellExecutor()
super().__init__(executor)
self.config = XprConfigParser(config_path)["packages_setup"]
def status(self, **kwargs):
"""
Checks the status of existing running application
Returns:
True, if setup is successful. False Otherwise
Raises:
PackageFailedException
"""
docker_name_list = ["nginx", "harbor-portal", "harbor-jobservice",
"harbor-core", "harbor-adminserver", "", "registry",
"registryctl", "harbor-persistence", "redis", "harbor-log"]
(_, output, _) = self.execute_command_with_output(
"docker inspect -f '{{.State.Running}}' {}".format(
' '.join(docker_name_list)
)
)
if 'false' in output:
return False
return True
def install(self, **kwargs):
"""
Sets up docker distribution in a VM
Returns:
True, if setup is successful. False Otherwise
Raises:
PackageFailedException
"""
current_directory = os.getcwd()
harbor_folder = self.config[self.CONFIG_SECTION][self.HARBOR_TMP_FOLDER]
try:
if not os.path.exists(harbor_folder):
os.makedirs(harbor_folder)
except OSError:
self.logger.error("Can not create directory")
raise PackageFailedException("Harbor temp folder can't be created")
self.execute_command(
"wget https://storage.googleapis.com/harbor-releases/"
"release-1.7.0/harbor-online-installer-v1.7.5.tgz -O "
"{}/harbor.tgz".format(harbor_folder))
os.chdir(harbor_folder)
self.execute_command("tar xvf harbor.tgz".format())
extracted_folder = os.path.join(harbor_folder, "harbor")
try:
os.chdir(extracted_folder)
except OSError:
self.logger.error("Harbor Folder not found")
raise PackageFailedException("Harbor Folder not found")
os.chdir(current_directory)
shutil.copy(self.config[self.CONFIG_SECTION][self.HARBOR_CFG_FILE],
extracted_folder)
shutil.copy(self.config[self.CONFIG_SECTION][self.HARBOR_COMPOSE_FILE],
extracted_folder)
os.chdir(extracted_folder)
self.execute_command("/bin/bash install.sh")
os.chdir(current_directory)
return True
def uninstall(self, **kwargs):
"""
Remove docker distribution
Returns:
True, if setup is successful. False Otherwise
Raises:
PackageFailedException
"""
"""
cd $PWD/config/harbor
docker-compose up -d
"""
harbor_tmp_dir = self.config[self.CONFIG_SECTION][
self.HARBOR_TMP_FOLDER]
harbor_dir = os.path.join(harbor_tmp_dir, "harbor")
try:
os.chdir(harbor_dir)
except OSError:
self.logger("{} not found.".format(harbor_dir))
raise PackageFailedException(
"{} not found. Required for stopping".format(harbor_dir))
self.execute_command("/usr/local/bin/docker-compose up -d")
return True
def start(self, **kwargs):
return self.install()
def stop(self, **kwargs):
return self.uninstall()
| [
"arunangsutech@gmail.com"
] | arunangsutech@gmail.com |
2bfdde0bcb495db0d4fffb4b7621471a705ddae0 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/138/usersdata/210/47892/submittedfiles/volumeTV.py | d81c84c26ad1a6f289f01b721bd8d351fc33d0df | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py |
e=int(input('digite e:'))
f=int(input('digite f:'))
cont=e
for i in range(1,f+1,1):
a=int(input('digite alteração:'))
if (cont+a)<=100 and cont>=0:
cont=cont+a
elif (cont+a)>=100:
e=cont-100
cont=cont-e
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
d18fe269d04f86552cec9fbf8aa058fd36933eb2 | 4421cdef9d23204d392726db4e3886b8aec3072d | /Django/SDD/HEHE/migrations/0005_auto_20200913_1127.py | 058d909ce14f025e64ab9d7d4d3d2cb91ca114cb | [] | no_license | Sanketdave12/PRACTICE | f38b8eae569ee670db55610202ef6f3e60fade87 | 0f71359cf5326be73b3d9d4b1219bea4832cc194 | refs/heads/master | 2022-12-27T08:08:45.953842 | 2020-09-18T17:31:23 | 2020-09-18T17:31:23 | 296,587,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | # Generated by Django 3.1.1 on 2020-09-13 05:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('HEHE', '0004_delete_profile'),
]
operations = [
migrations.RemoveField(
model_name='notice',
name='branch',
),
migrations.DeleteModel(
name='Branch',
),
]
| [
"sddave1998@gmail.com"
] | sddave1998@gmail.com |
df748b62727262acccea6b2dec74421a653c6b2d | fae5487c5e50d0f42cd4fc82011c67df17b424c0 | /generatorify.py | ced38a537b4f15b00af87cc145d7e2ebb0febbc2 | [
"MIT"
] | permissive | eric-wieser/generatorify | b970dbda12f7e3dca481b29647a685294dc370c9 | 7bd759ecf88f836ece6cdbcf7ce1074260c0c5ef | refs/heads/master | 2020-06-25T05:37:03.755586 | 2019-07-28T07:03:47 | 2019-07-28T07:03:47 | 199,217,917 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,760 | py | import queue
import threading
import collections.abc
class generator_from_callback(collections.abc.Generator):
"""
A generator wrapper for a function that invokes a callback multiple times.
Calling `send` on the generator emits a value from one callback, and returns
the next.
Note this starts a background thread
"""
def __init__(self, func):
self._ready_queue = queue.Queue(1)
self._done_queue = queue.Queue(1)
self._done_holder = [False]
# local to avoid reference cycles
ready_queue = self._ready_queue
done_queue = self._done_queue
done_holder = self._done_holder
def callback(value):
done_queue.put((False, value))
cmd, val = ready_queue.get()
if cmd == 'send':
return val
elif cmd == 'throw':
raise val
else:
assert False # pragma: no cover
def thread_func():
while True:
cmd, val = ready_queue.get()
if cmd == 'send' and val is not None:
done_queue.put((True, TypeError("can't send non-None value to a just-started generator")))
continue
break
try:
if cmd == 'throw':
raise val
ret = func(callback)
raise StopIteration(ret) if ret is not None else StopIteration
except BaseException as e:
done_holder[0] = True
done_queue.put((True, e))
self._thread = threading.Thread(target=thread_func)
self._thread.start()
def _put(self, *args):
if self._done_holder[0]:
raise StopIteration
self._ready_queue.put(args)
is_exception, val = self._done_queue.get()
if is_exception:
try:
raise val
finally:
# prevent val's traceback containing a reference cycle
del val
else:
return val
def send(self, value):
return self._put('send', value)
def throw(self, exc):
return self._put('throw', exc)
def __next__(self):
return self.send(None)
def close(self):
try:
self.throw(GeneratorExit)
except StopIteration:
self._thread.join()
except GeneratorExit:
self._thread.join()
except BaseException:
self._thread.join()
raise
else:
# yielded again, can't clean up the thread
raise RuntimeError('Task with callback ignored GeneratorExit')
def __del__(self):
self.close()
class callback_from_generator(collections.abc.Callable):
"""
Wraps a generator function into a function that emits items
via callbacks instead
"""
def __init__(self, generator_func):
self._generator_func = generator_func
def __call__(self, callback):
g = self._generator_func()
try:
try:
from_g = next(g)
except StopIteration as si:
return si.value
# other exceptions propagate
while True:
try:
v_from_c = callback(from_g)
except BaseException as e_from_c:
try:
from_g = g.throw(e_from_c)
except StopIteration as si:
return si.value
else:
try:
from_g = g.send(v_from_c)
except StopIteration as si:
return si.value
finally:
g.close()
| [
"wieser.eric@gmail.com"
] | wieser.eric@gmail.com |
52cef1b52cab1ee988800a17d8e10bdc0f556955 | ef8a358a3f54a26610eadcac6d0ebca406fa2578 | /undermythumb/files.py | 36b90251fc9af101bfca1712b36bdec09229c64d | [
"BSD-3-Clause"
] | permissive | GunioRobot/django-undermythumb | af8f2f0ac0ec65d6d4c777eaf380510b8e81bd1b | f70be02998cbe97e452d8e0d66e8efc276e77621 | refs/heads/master | 2020-12-25T09:18:40.332593 | 2011-11-08T20:17:56 | 2011-11-08T20:18:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,632 | py | from django.db.models.fields.files import ImageFieldFile
__all__ = ['ThumbnailFieldFile', 'ImageWithThumbnailsFieldFile']
class ThumbnailSet(object):
def __init__(self, field_file):
self.file = field_file
self.field = self.file.field
self.instance = self.file.instance
self._cache = {}
self._populate()
def _populate(self):
if not self._cache and self.file.name and self.instance.id:
for options in self.field.thumbnails:
try:
attname, renderer, key = options
except ValueError:
attname, renderer = options
key = attname
ext = '.%s' % renderer.format
name = self.field.get_thumbnail_filename(
instance=self.instance,
original=self.file,
key=key,
ext=ext)
thumbnail = ThumbnailFieldFile(
attname,
renderer,
self.instance,
self.field,
name)
self._cache[attname] = thumbnail
def clear_cache(self):
self._cache = {}
def __getattr__(self, name):
try:
return self._cache[name]
except KeyError:
return None
def __iter__(self):
self._populate()
for attname, value in self._cache.iteritems():
yield value
class ThumbnailFieldFile(ImageFieldFile):
def __init__(self, attname, renderer, *args, **kwargs):
self.attname = attname
self.renderer = renderer
super(ThumbnailFieldFile, self).__init__(*args, **kwargs)
def save(self):
raise NotImplemented('Thumbnails cannot be saved directly.')
class ImageWithThumbnailsFieldFile(ImageFieldFile):
"""File container for an ``ImageWithThumbnailsField``.
"""
def __init__(self, *args, **kwargs):
super(ImageWithThumbnailsFieldFile, self).__init__(*args, **kwargs)
self.thumbnails = ThumbnailSet(self)
def save(self, name, content, save=True):
"""Save the original image, and its thumbnails.
"""
super(ImageWithThumbnailsFieldFile, self).save(name, content, save)
self.thumbnails.clear_cache()
# iterate over thumbnail
for thumbnail in self.thumbnails:
rendered = thumbnail.renderer.generate(content)
self.field.storage.save(thumbnail.name, rendered)
| [
"mattdennewitz@gmail.com"
] | mattdennewitz@gmail.com |
2e826a8d46d2e1211a1caa8a225498c5824b60a3 | 52a61caff0aeb434c32e5657e38762643e9f57dd | /DataStructuresAndAlgorithms/SearchAndSort/Sort/Count&CountingSort&Digital(Bitwise)Sorting/socks.py | 189a8837bd71a575381bfb18e4a7ee984d0e9355 | [] | no_license | AndrewErmakov/PythonTrainingBasics | 1480a6378d1ec59884760e2b3014ccc3d28f058f | 639e15bbfc54da762cb9e366497754cfece30691 | refs/heads/master | 2021-06-10T15:57:58.682335 | 2021-03-25T13:37:30 | 2021-03-25T13:37:30 | 153,678,760 | 0 | 0 | null | 2018-10-30T13:52:51 | 2018-10-18T19:45:47 | Python | UTF-8 | Python | false | false | 542 | py | len_table, count_socks, count_points = map(int, input().split())
def determination_thickness(length_table, number_socks, number_points):
count_list = [0] * (length_table + 1)
for _ in range(number_socks):
left_border, right_border = map(int, input().split())
for i in range(left_border, right_border + 1):
count_list[i] += 1
for j in range(number_points):
index_point = int(input())
print(count_list[index_point])
determination_thickness(len_table, count_socks, count_points)
| [
"andrew.67@list.ru"
] | andrew.67@list.ru |
4c350d9b6720d62fa21156d31748be72346a2283 | ca3150c69ef477ea53902c51d3840195262f2903 | /ISStreamer-r-3-bucket.py | 797f6edf0693846d02d60e183933d15353f5a284 | [] | no_license | chaeplin/dash-mainnet | 66c8df6f4a6df25c53e9ba1572a39e12d9e61daf | 10891c210a3cf40f2e052ee9d2657a97a71efba6 | refs/heads/master | 2021-01-19T21:15:49.790385 | 2017-11-14T20:25:52 | 2017-11-14T20:25:52 | 82,476,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,549 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import io, os, sys
import simplejson as json
import datetime
import time
import redis
from ISStreamer.Streamer import Streamer
# bucket1 = dash121
iss_bucket_name1 = 'dash121'
iss_bucket_key1 = 'xxx'
iss_access_key1 = 'xxxx'
QUE_NAME1 = 'INITIALSTATE_PUSH' + iss_bucket_name1
# bucket2 = testnet
iss_bucket_name2 = 'testnet'
iss_bucket_key2 = 'xxxx'
iss_access_key2 = 'xxxxxx'
QUE_NAME2 = 'INITIALSTATE_PUSH' + iss_bucket_name2
# bucket3 = ticker
iss_bucket_name3 = 'ticker'
iss_bucket_key3 = 'xxxx'
iss_access_key3 = 'xxxx'
QUE_NAME3 = 'INITIALSTATE_PUSH' + iss_bucket_name3
# streamer
streamer1 = Streamer(bucket_key=iss_bucket_key1, access_key=iss_access_key1)#, debug_level=2)
streamer2 = Streamer(bucket_key=iss_bucket_key2, access_key=iss_access_key2)#, debug_level=2)
streamer3 = Streamer(bucket_key=iss_bucket_key3, access_key=iss_access_key3)#, debug_level=2)
# redis
POOL = redis.ConnectionPool(host='192.168.10.2', port=16379, db=0)
r = redis.StrictRedis(connection_pool=POOL)
# main
try:
r.ping()
except Exception as e:
print(e)
sys.exit()
try:
while 1:
quelist = (QUE_NAME1, QUE_NAME2, QUE_NAME3)
jobque = r.brpop(quelist, 5)
if jobque:
redis_val = json.loads(jobque[1].decode("utf-8"))
bucket_name = redis_val.get('bucket_name', 'dash121')
kprefix = redis_val.get('key_prefix')
epoch00 = redis_val.get('epoch')
bucket = redis_val.get('bucket')
print(epoch00, bucket_name, kprefix, bucket)
if bucket_name == iss_bucket_name1:
streamer1.log_object(bucket, key_prefix=kprefix, epoch=epoch00)
elif bucket_name == iss_bucket_name2:
streamer2.log_object(bucket, key_prefix=kprefix, epoch=epoch00)
elif bucket_name == iss_bucket_name3:
streamer3.log_object(bucket, key_prefix=kprefix, epoch=epoch00)
time.sleep(0.25)
else:
b = { "tstamp": time.time() }
streamer1.log_object(b)
streamer2.log_object(b)
streamer3.log_object(b)
streamer1.flush()
time.sleep(0.25)
streamer2.flush()
time.sleep(0.25)
streamer3.flush()
time.sleep(0.25)
except Exception as e:
print(e)
sys.exit()
except KeyboardInterrupt:
print('[dequeue] intterupted by keyboard')
sys.exit()
| [
"chaeplin@gmail.com"
] | chaeplin@gmail.com |
7dfe6483c3bf294fe7a02d964523db051a1eb588 | f166278e5e626c142245e4a9164ab4ed610a5cd4 | /apps/utils/requests_wrapper.py | 8d554e89487b84d6e723d8a0a51469e5668965e3 | [
"MIT"
] | permissive | wumulong/advance_django_example | 4cffd6de2eb9fdccefff7b995317a81e63b459be | 4832438a7db1065f7351a6cf4d4580ca1b6fffeb | refs/heads/master | 2021-06-24T13:34:31.873207 | 2017-09-11T15:13:40 | 2017-09-11T15:13:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,514 | py | import requests
def requests_get(protocal, host, port, url, headers={}, params={}):
reqeusts_result = {}
if port:
requests_url = '{0}://{1}:{2}{3}'.format(protocal, host, port, url)
else:
requests_url = '{0}://{1}{2}'.format(protocal, host, url)
try:
r = requests.get(requests_url, headers=headers, params=params)
r.raise_for_status()
json_result = r.json()
reqeusts_result['data'] = json_result
reqeusts_result['errors'] = ''
except requests.exceptions.RequestException as e:
json_result = r.json()
if json_result['errors']:
reqeusts_result['errors'] = json_result['errors']
else:
reqeusts_result['errors'] = e
return reqeusts_result
def requests_post(protocal, host, port, url, headers={}, json={}):
reqeusts_result = {}
if port:
requests_url = '{0}://{1}:{2}{3}'.format(protocal, host, port, url)
else:
requests_url = '{0}://{1}{2}'.format(protocal, host, url)
try:
r = requests.post(requests_url, headers=headers, json=json)
r.raise_for_status()
json_result = r.json()
reqeusts_result['data'] = json_result
reqeusts_result['errors'] = ''
except requests.exceptions.RequestException as e:
json_result = r.json()
if json_result['errors']:
reqeusts_result['errors'] = json_result['errors']
else:
reqeusts_result['errors'] = e
return reqeusts_result
| [
"lsdlab@icloud.com"
] | lsdlab@icloud.com |
320615428a22aa133469f7c75f2279ba09ba1719 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/list_az2_migrate_request.py | 82455211ce51806b36c1f4d66474c9564882e62b | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,026 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListAz2MigrateRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str'
}
attribute_map = {
'instance_id': 'instance_id'
}
def __init__(self, instance_id=None):
"""ListAz2MigrateRequest - a model defined in huaweicloud sdk"""
self._instance_id = None
self.discriminator = None
self.instance_id = instance_id
@property
def instance_id(self):
"""Gets the instance_id of this ListAz2MigrateRequest.
实例ID。
:return: The instance_id of this ListAz2MigrateRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ListAz2MigrateRequest.
实例ID。
:param instance_id: The instance_id of this ListAz2MigrateRequest.
:type: str
"""
self._instance_id = instance_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListAz2MigrateRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
deb19ce4bc3e611f561d6ea04d4999f2e9b15fc3 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /bJxNHk7aovkx8Q776_10.py | 69e76a086c61099322e2b65ff967b675005faf40 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py |
def gold_distribution(gold):
totals, p = [0, 0], 0
while gold:
idx = -1 if gold[-1] > gold[0] else 0
totals[p] += gold.pop(idx)
p = 1 - p
return totals
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
9e0aa6a22faffb9424db0cf20701f012fab75424 | 59b72b8f662cd605b3ce31f54779c17e5ca066d0 | /interview_q/leet_code/解码方法.py | 84496ba3e188f5a6b9f0ffc14b200e43e24c76c6 | [] | no_license | dongyang2/hello-world | c1f5853ccafd6b8f23836192547ab36f898e0891 | 1f859b53e2b21ed5a648da09b84950f03ec1b370 | refs/heads/master | 2022-12-11T22:07:22.853912 | 2022-11-24T03:52:35 | 2022-11-24T03:52:35 | 119,025,960 | 0 | 0 | null | 2018-01-26T10:09:58 | 2018-01-26T08:28:10 | null | UTF-8 | Python | false | false | 4,137 | py | # https://leetcode-cn.com/problems/decode-ways/
# coding:utf-8
# Python 3
# 数字与字母一一对应,求转换结果。解法参考本文件夹下的爬楼梯和电话号码的字母组合。
"""
此题和爬楼梯是同一个类型的问题,难点在于其添加了许多限制条件,只要避开限制条件就可以完美解题了
每次递进,可以选取一个数也可以选取两个数:
s[i] != '0'
如果 s[i-1]s[i] <= 26, 则 dp[i] = dp[i-1] + dp[i-2]
如果 s[i-1]s[i] > 26, 则 dp[i] = dp[i-1], 这是因为 s[i-1]s[i] 组成的两位数无法翻译
s[i] == '0'
如果 s[i-1]s[i] <= 26, 则 dp[i] = dp[i-2], 这是因为 s[i] 无法翻译
还有一些情景直接使得整个序列无法被翻译:
相邻的两个 ‘0’
以 ‘0’ 结尾的大于 26 的数字
去除这些限制条件,此题就是爬楼梯的问题了,一次可以爬一步,也可以爬两步,问有多少中方式到达终点。
作者:nfgc
链接:https://leetcode-cn.com/problems/decode-ways/solution/dong-tai-gui-hua-tu-jie-by-nfgc/
来源:力扣(LeetCode)
著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
"""
# 除了解题思路以外,还要注意‘10’,‘00’, '101','301'这样的边界输入
def clim(s, dic):
"""有条件的爬楼梯,自底向上法"""
ln = len(s)
if ln < 1 or s[0] == '0':
return 0
if ln == 1:
if s == '0':
return 0
return [dic[s]]
li1 = [dic[s[0]]]
li2 = decode_2char(s[:2], dic)
if li2 is False:
return 0
for i in range(2, ln):
tmp = [] # 充当爬楼梯中的final
two_char = s[i - 1] + s[i]
if s[i] != '0':
if int(two_char) <= 26 and s[i - 1] != '0':
tmp = decode(two_char, li1, tmp, dic)
tmp = decode(s[i], li2, tmp, dic)
else:
tmp = decode(s[i], li2, tmp, dic)
else:
if s[i - 1] == '1' or s[i - 1] == '2':
tmp = decode(two_char, li1, tmp, dic)
else:
return 0
li1, li2 = li2, tmp
return li2
def decode_2char(n, dic):
# 解码两位数字
s1 = n[0]
s2 = n[1]
if s2 == '0':
if s1 == '1' or s1 == '2':
return [dic[n]]
else:
return False
li = [dic[n[0]] + dic[n[1]]]
if int(n) <= 26:
li.append(dic[n])
return li
def decode(s: str, li: list, tmp: list, dic):
for i in li:
tmp.append(i + dic[s])
return tmp
def clim_pure_num(s):
"""有条件的爬楼梯,自底向上纯计数方式,不收集元素"""
ln = len(s)
if ln < 1 or s[0] == '0':
return 0
if ln == 1:
return 1
f1 = 1
f2 = decode_2char_pure_num(s[:2])
if f2 is False:
return 0
for i in range(2, ln):
two_char = s[i - 1] + s[i]
if s[i] != '0':
if int(two_char) <= 26 and s[i - 1] != '0':
final = f1 + f2
else:
final = f2
else:
if s[i - 1] == '1' or s[i - 1] == '2':
final = f1
else:
return 0
f1, f2 = f2, final
return f2
def decode_2char_pure_num(n):
# 解码两位数字,配合纯数字版
s1 = n[0]
s2 = n[1]
if s2 == '0':
if s1 == '1' or s1 == '2':
return 1
else:
return False
if int(n) <= 26:
return 2
else:
return 1
def main():
dic = dict()
count = 1
upper_abc = [chr(65 + x) for x in range(26)]
for i in upper_abc:
dic[str(count)] = i
count += 1
# print(dic)
n = "27"
# li = clim(n, dic)
li = clim_pure_num(n)
if li == 0:
raise ValueError('Please input valid number.')
print(li)
if __name__ == '__main__':
import time
print('-' * 15, 'Start', time.ctime(), '-' * 15, '\n')
main()
# print(str(int('00000000235600000')))
print('%s%s %s %s %s' % ('\n', '-' * 16, 'End', time.ctime(), '-' * 16))
| [
"dongyangzhao@outlook.com"
] | dongyangzhao@outlook.com |
3d027acd0606c60ffd743a4cc8749791bd344110 | 527d3d38b57b9f12ea6e91167a9a06149a89044b | /wxFEFactory/python/tools/base/assembly_hacktool.py | 9606e4c6a07dd554a1bb017e2c31e0fef44e904c | [] | no_license | czastack/wxFEFactory | 0d7ee77e542234c039b6056aeb44cb2a40341714 | bed824746266734e40103010c0132aad069d723a | refs/heads/master | 2021-06-04T11:31:05.808038 | 2019-06-03T18:51:01 | 2019-06-03T18:51:01 | 97,013,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,752 | py | import types
from functools import partial
from lib.extypes import DataClass
from lib.hack import utils
from fefactory_api import ui
from .hacktool import BaseHackTool
from .assembly_code import AssemblyGroup
class AssemblyHacktool(BaseHackTool):
"""现在支持x86 jmp"""
allocated_memory = None
def __init__(self):
super().__init__()
self.variable_model = VariableModel(self.weak)
def reset(self):
self.allocated_memory = None
self.next_usable_memory = None
self.registed_assembly = None
self.registed_variable = None
def onattach(self):
super().onattach()
self.reset()
self.is32process = self.handler.is32process
def ondetach(self):
super().ondetach()
if self.allocated_memory is not None:
self.handler.free_memory(self.allocated_memory)
for key, value in self.registed_assembly.items():
if value['active']:
self.unregister_assembly_item(value)
self.reset()
def render_assembly_functions(self, functions, cols=4, vgap=10):
with ui.GridLayout(cols=cols, vgap=vgap, className="expand"):
self.assembly_buttons = {}
for item in functions:
button = ui.ToggleButton(label=item.label,
onchange=partial(__class__.toggle_assembly_function, self.weak, item=item))
if item.help:
button.setToolTip(item.help)
self.assembly_buttons[item.key] = button
def toggle_assembly_function(self, btn, item):
checked = btn.checked
if isinstance(item, AssemblyItem):
if checked:
self.register_assembly(item)
else:
self.unregister_assembly(item.key)
elif isinstance(item, AssemblyItems):
for item in item.children:
if checked:
self.register_assembly(item)
else:
self.unregister_assembly(item.key)
elif isinstance(item, AssemblySwitch):
self.set_variable_value(item.key, int(checked))
elif isinstance(item, SimpleButton):
item.onclick(checked)
def toggle_assembly_button(self, key):
self.assembly_buttons[key].toggle()
def insure_memory(self):
if self.allocated_memory is None:
# 初始化代码区 PAGE_EXECUTE_READWRITE
self.next_usable_memory = self.allocated_memory = self.handler.alloc_memory(2048, protect=0x40)
self.registed_assembly = {}
self.registed_variable = {}
def register_assembly(self, item):
"""注册机器码修改
:param item: AssemblyItem
"""
if not self.handler.active:
return
self.insure_memory()
if item.key in self.registed_assembly:
data = self.registed_assembly[item.key]
addr = data['addr']
replace = data['replace']
data['active'] = True
else:
original = item.original
replace = item.replace
assembly = item.assembly
replace_len = item.replace_len
replace_offset = item.replace_offset
addr = self.find_address(original, item.find_start, item.find_end, item.find_base, item.fuzzy)
if addr is -1:
print('找不到地址: ', item.key)
return
memory = self.next_usable_memory
original_len = len(original)
read_original = False # 是否需要通过handler读取最新的original
if replace_offset:
if replace_len is 0:
if 0 < replace_offset < original_len:
replace_len = original_len - replace_offset
else:
raise ValueError('此情况下replace_offset依赖于replace_len参数')
if replace_offset > 0 and replace_len <= original_len - replace_offset and not item.fuzzy:
original = original[replace_offset: replace_len + replace_offset]
else:
read_original = True
addr += replace_offset
original_len = replace_len
else:
if replace_len:
original_len = replace_len
if item.fuzzy:
read_original = True
elif replace_len:
original = original[:original_len]
if read_original:
original = self.handler.read(addr, bytes, original_len)
if item.inserted:
available_len = original_len - len(replace) # 可用于跳转到插入的代码的jmp指令的长度
# 使用参数(暂时支持4字节)
if item.args:
memory_conflict = memory == self.next_usable_memory
if isinstance(assembly, AssemblyGroup):
for arg in item.args:
self.register_variable(arg)
else:
if self.next_usable_memory > 0xFFFFFFFF:
raise ValueError('目前只支持32位参数地址')
assembly = assembly % tuple(self.register_variable(arg).addr.to_bytes(4, 'little')
for arg in item.args)
if memory_conflict:
memory = self.next_usable_memory
# 动态生成机器码
if isinstance(assembly, AssemblyGroup):
assembly = assembly.generate(self, types.SimpleNamespace(
item=item, original_addr=addr, original=original, addr=memory,
))
jmp_offset = memory - (addr + 5)
if abs(jmp_offset) < 0x7FFFFFFF or self.is32process:
# E9 relative address
# 计算jump地址, 5是jmp opcode的长度
jmp_len = 5
diff_new = utils.u32(jmp_offset)
diff_back = utils.u32(addr + original_len - (memory + len(assembly) + 5))
replace += replace + b'\xE9' + diff_new.to_bytes(4, 'little')
assembly = assembly + b'\xE9' + diff_back.to_bytes(4, 'little')
else:
if available_len >= 14:
jmp_len = 14
# FF25 00000000 absolute address
replace += replace + b'\xFF\x25\x00\x00\x00\x00' + memory.to_bytes(8, 'little')
elif available_len >= 7 and self.next_usable_memory < 0xFFFFFFFF:
# ptr jmp
jmp_len = 7
memory_conflict = memory == self.next_usable_memory
temp = self.register_variable(VariableType(item.key + '_jmp', size=8)).addr
if memory_conflict:
memory = self.next_usable_memory
self.handler.write_ptr(temp, memory)
replace += b'\xFF\x24\x25' + temp.to_bytes(4, 'little')
else:
raise ValueError('不支持当前情况jmp')
assembly = assembly + b'\xFF\x25\x00\x00\x00\x00' + (addr + original_len).to_bytes(8, 'little')
if available_len < jmp_len:
raise ValueError("可用长度不足以插入jmp代码")
elif available_len != jmp_len:
# 填充的NOP
replace += b'\x90' * (available_len - jmp_len)
if memory == self.next_usable_memory:
self.next_usable_memory += utils.align_4(len(assembly))
self.handler.write(memory, assembly)
data = self.registed_assembly[item.key] = {'active': True, 'addr': addr, 'original': original,
'replace': replace, 'memory': memory}
self.handler.write(addr, replace)
def unregister_assembly(self, key):
"""恢复机器码修改"""
items = getattr(self, 'registed_assembly', None)
if items:
item = items.get(key, None)
if item is not None:
self.unregister_assembly_item(item)
def unregister_assembly_item(self, item):
self.handler.write(item['addr'], item['original'])
item['active'] = False
def find_address(self, original, find_start, find_end, find_base=True, fuzzy=False):
base_addr = find_base is True and self.handler.base_addr or callable(find_base) and find_base() or find_base
if callable(find_start):
find_start = find_start()
if callable(find_end):
find_end = find_end()
if base_addr:
find_start += base_addr
find_end += base_addr
return self.handler.find_bytes(original, find_start, find_end, fuzzy=fuzzy)
def register_variable(self, variable):
"""注册变量"""
self.insure_memory()
if isinstance(variable, str):
variable = VariableType(variable)
elif isinstance(variable, tuple):
variable = VariableType(*variable)
temp = self.registed_variable.get(variable.name, None)
if temp is None:
if variable.addr:
variable = variable.clone()
align = variable.align or variable.size
variable.addr = utils.align_size(self.next_usable_memory, align)
self.registed_variable[variable.name] = variable
self.next_usable_memory += utils.align_size(variable.size, align)
else:
variable = temp
if variable.value is not 0:
self.handler.write(variable.addr, variable.value, variable.size)
return variable
def get_variable(self, name):
"""获取变量对象"""
if self.allocated_memory:
return self.registed_variable.get(name, None)
def get_variable_value(self, name, default=None):
"""变量值读取"""
variable = self.get_variable(name)
if variable:
return self.handler.read(variable.addr, variable.type, variable.size)
return default
def set_variable_value(self, name, value):
"""变量值写入"""
variable = self.get_variable(name)
if variable:
self.handler.write(variable.addr, variable.type(value), variable.size)
class VariableModel:
"""主要用于ModelWidget绑定变量"""
def __init__(self, owner):
object.__setattr__(self, 'owner', owner)
def __getattr__(self, name):
return self.owner.get_variable_value(name)
def __setattr__(self, name, value):
return self.owner.set_variable_value(name, value)
def __and__(self, field):
variable = self.owner.get_variable(field)
return variable and variable.addr or 0
class AssemblyItems:
def __init__(self, label, *children, help=None):
self.label = label
self.children = children
self.help = help
@property
def key(self):
return self.children[0].key
""" register_assembly 的参数类型
:param original: 原始数据
:param find_start: 原始数据查找起始
:param find_end: 原始数据查找结束
:param replace: 原始数据替换为的内容
:param assembly: 写到新内存的内容
:param find_base: 是否将find_start和find_end加上模块起始地址
:param inserted: 是否自动加入jmp代码
:param replace_len: 只记录original前n个字节
"""
AssemblyItem = DataClass(
'AssemblyItem',
('key', 'label', 'original', 'find_start', 'find_end', 'replace', 'assembly', 'find_base',
'inserted', 'fuzzy', 'replace_len', 'replace_offset', 'args', 'help', 'ext'),
defaults={
'assembly': None,
'find_base': True,
'inserted': False,
'fuzzy': False,
'replace_len': 0,
'replace_offset': 0,
'args': ()
}
)
AssemblySwitch = DataClass('AssemblySwitch', ('key', 'label'))
SimpleButton = DataClass('SimpleButton', ('key', 'label', 'onclick'))
VariableType = DataClass('VariableType', ('name', 'size', 'type', 'value', 'align', 'addr'),
defaults={'size': 4, 'type': int, 'value': 0})
| [
"2464851375@qq.com"
] | 2464851375@qq.com |
75db6db8e3ec02e93aea1287da42f3a1327f4b24 | 4b7e282fe480415f5d52c0fc0429f144156190fe | /google/ads/googleads/v8/services/services/campaign_service/transports/grpc.py | 9067b4cccf09e89fdf60722cc9f8c8a41a92aaff | [
"Apache-2.0"
] | permissive | Z2Xsoft/google-ads-python | c4750357bb19da91bb3b6bf2fa84bef9d2df36d3 | 1779d52a0446c8afb2437b0a9e103dcb849f5590 | refs/heads/main | 2023-08-18T15:22:17.840364 | 2021-09-26T04:08:53 | 2021-09-26T04:08:53 | 410,444,398 | 0 | 0 | Apache-2.0 | 2021-09-26T04:08:53 | 2021-09-26T03:55:38 | null | UTF-8 | Python | false | false | 12,386 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v8.resources.types import campaign
from google.ads.googleads.v8.services.types import campaign_service
from .base import CampaignServiceTransport, DEFAULT_CLIENT_INFO
class CampaignServiceGrpcTransport(CampaignServiceTransport):
"""gRPC backend transport for CampaignService.
Service to manage campaigns.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_campaign(
self,
) -> Callable[[campaign_service.GetCampaignRequest], campaign.Campaign]:
r"""Return a callable for the get campaign method over gRPC.
Returns the requested campaign in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetCampaignRequest],
~.Campaign]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_campaign" not in self._stubs:
self._stubs["get_campaign"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v8.services.CampaignService/GetCampaign",
request_serializer=campaign_service.GetCampaignRequest.serialize,
response_deserializer=campaign.Campaign.deserialize,
)
return self._stubs["get_campaign"]
@property
def mutate_campaigns(
self,
) -> Callable[
[campaign_service.MutateCampaignsRequest],
campaign_service.MutateCampaignsResponse,
]:
r"""Return a callable for the mutate campaigns method over gRPC.
Creates, updates, or removes campaigns. Operation statuses are
returned.
List of thrown errors: `AdxError <>`__
`AuthenticationError <>`__ `AuthorizationError <>`__
`BiddingError <>`__ `BiddingStrategyError <>`__
`CampaignBudgetError <>`__ `CampaignError <>`__
`ContextError <>`__ `DatabaseError <>`__ `DateError <>`__
`DateRangeError <>`__ `DistinctError <>`__ `FieldError <>`__
`FieldMaskError <>`__ `HeaderError <>`__ `IdError <>`__
`InternalError <>`__ `ListOperationError <>`__
`MutateError <>`__ `NewResourceCreationError <>`__
`NotAllowlistedError <>`__ `NotEmptyError <>`__ `NullError <>`__
`OperationAccessDeniedError <>`__ `OperatorError <>`__
`QuotaError <>`__ `RangeError <>`__ `RegionCodeError <>`__
`RequestError <>`__ `ResourceCountLimitExceededError <>`__
`SettingError <>`__ `SizeLimitError <>`__
`StringFormatError <>`__ `StringLengthError <>`__
`UrlFieldError <>`__
Returns:
Callable[[~.MutateCampaignsRequest],
~.MutateCampaignsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_campaigns" not in self._stubs:
self._stubs["mutate_campaigns"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v8.services.CampaignService/MutateCampaigns",
request_serializer=campaign_service.MutateCampaignsRequest.serialize,
response_deserializer=campaign_service.MutateCampaignsResponse.deserialize,
)
return self._stubs["mutate_campaigns"]
__all__ = ("CampaignServiceGrpcTransport",)
| [
"noreply@github.com"
] | Z2Xsoft.noreply@github.com |
73a708a16278b09791fad51bf5d305698e82b80d | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_2352+180/sdB_PG_2352+180_coadd.py | 4f361c9b0b46950c02cba15a5912a175c25af4eb | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[358.821833,18.337653], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_PG_2352+180/sdB_PG_2352+180_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_PG_2352+180/sdB_PG_2352+180_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
e391925b4696862a60c04799026bcb83b93c85ed | 6e507e231d37d0b61d70d6694ffc928c1c638973 | /lab07password_generator.py | e140d562109bb3555972e6c8641cf57aee410451 | [] | no_license | pjz987/pdx_code_intro_class | 7a4998be23b90883aad55664ceb97baffe3fcf92 | e85c2e01718e75124b956b924af01e87cdd95ee1 | refs/heads/master | 2020-09-12T21:25:50.152682 | 2019-11-18T23:10:38 | 2019-11-18T23:10:38 | 222,561,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,271 | py | '''
filename : lab07password_generator.py
Lab 7: Password Generator
Let's generate a password ten characters long using a loop (while loop or for loop) and random.choice, this will be a string of random characters.
'''
#always import random first if needed
import random
#using import string to quickly call up alphabet/numbers
import string
pass_length = input("How many characters do you want in your password?\n")
pass_length_int = int(pass_length)
#^I needed int() to turn the string pass_length into the integer pass_length_int
password = ''
for characters in range(pass_length_int):
characters = string.ascii_lowercase + string.digits + string.punctuation + string. ascii_uppercase
#^ was characters = [string.ascii_lowercase + string.digits] the brackets made it not work right
password = password + random.choice(characters)
print(password)
'''
Advanced Version 1
Allow the user to choose how many characters the password will be.
##I went back and added this code starting on line 14.##
'''
'''
Advanced Version 2
Allow the user to choose how many letters, numbers, and punctuation characters they want in their password. Mix everything up using list(), random.shuffle(), and ''.join().
##^figure out what this is about some other day^##
'''
| [
"pwj2012@gmail.com"
] | pwj2012@gmail.com |
130b6512484065d4534c3cc77c76a9869d44fb1d | 9dcac6f93c2e460009e3355976989adf3bf1af68 | /PrintSum.py | 3bfb387f9a676c65a8c7e920354df4dc5c3b67e6 | [] | no_license | lpham4/PythonPractice | 99a4db621a6e524b2264314f1d4d47e2474260f9 | fac0931d09441ad03c4b34abae01f928342d53d7 | refs/heads/main | 2023-01-02T11:56:39.192524 | 2020-10-21T23:45:52 | 2020-10-21T23:45:52 | 306,173,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | # Class: 1321L
# Section: 02
# Term: Fall 2018
# Instructor: Malcolm
# Name: Ly Pham
# Lab: Python
integer = int(input('Enter an integer between 1 and 100: '))
print('You entered:', integer)
if integer < 1 or integer > 100:
print('Invalid input. Try again.')
else:
count = 0
sum = 0
while count <= integer:
sum += count
count += 1
print('Sum of values: ', sum)
| [
"lpham4@students.kennesaw.edu"
] | lpham4@students.kennesaw.edu |
0f5ce3b78bff791a5a68f6e7abc26fc45e210335 | 6f6997efe1a15d57688c12ff0197790fb2eac6bc | /database/old/Init_analysisdb.py | 1c3641363d82c7f49cc1994332fc27e6559afddc | [] | no_license | montanaviking/waferprobe | 29fa5f0eb07e60820162916e48059f63374902c5 | fb2786b376153f9b6e9495b6faf3ee5960f90a06 | refs/heads/master | 2022-11-06T10:57:01.539733 | 2020-06-19T23:47:59 | 2020-06-19T23:47:59 | 273,601,408 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | # Phil Marsh Carbonics
# initialize database
from sqlalchemy.ext.declarative import declarative_base
from datetime import datetime
from sqlalchemy import (Column, Integer, Numeric, String, DateTime, ForeignKey, Boolean, create_engine)
from sqlalchemy.orm import relationship, backref, sessionmaker
Base=declarative_base()
class DataAccessLayer:
def __init__(self):
self.engine=None
#self.conn_string="mysql+pymysql:///montanaviking:nova@localhost/test"
def connect(self):
self.engine=create_engine("mysql+pymysql://montanaviking:nova@localhost/Carbonics_test")
Base.metadata.create_all(self.engine)
self.Session=sessionmaker(bind=self.engine)
dal=DataAccessLayer() | [
"microcraftx@gmail.com"
] | microcraftx@gmail.com |
4f98779fdde462765812a9d5470c61c3ca5eb16d | 442dae0500db1653541100292a356ab6452363da | /alchemy/test/autoencoder_test.py | 27b9d8e0d4d73dda490f172cd0b0abfcaf1ae4a2 | [
"MIT"
] | permissive | williamd4112/alchemy | b5da5092abd29f2541f0e91c4ed5da033318b9f5 | 6ca509aa2e332170666a67a53bea22f7749c2bc7 | refs/heads/master | 2021-09-07T22:57:15.198254 | 2018-03-02T15:30:27 | 2018-03-02T15:30:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | # -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
import unittest
from alchemy import layers, utils
class AutoEncoderTest(unittest.TestCase):
def test_conv2d_autoencoder(self):
tf.reset_default_graph()
inputs_ph = tf.placeholder(tf.float32, [None, 8, 8, 1])
scope = 'autoencoder'
strides = [1, 1]
latent_output, encoder, shapes = layers.conv2d_encoder(
inputs_ph, [2, 2], [2, 2], strides, 'encoder')
outputs = layers.conv2d_decoder(
latent_output, encoder, shapes, strides, 'decoder')
self.assertTrue(
utils.all_equal(
outputs.get_shape().as_list(),
inputs_ph.get_shape().as_list()))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
input_image = np.zeros((1, 8, 8, 1))
output_image = sess.run(outputs, feed_dict={inputs_ph: input_image})
self.assertTrue(np.all(np.equal(input_image, output_image)))
| [
"samwenke@gmail.com"
] | samwenke@gmail.com |
6f5a06f1011e0d3399391a74601845265556fd8b | f6348e051252ad0be630a815e33a3f85fbe64c69 | /capture/noworkflow/now/persistence/models/environment_attr.py | a367c23b7af82ec6f6631b0f5bae34e22e5c1dad | [
"MIT"
] | permissive | stefan-grafberger/noworkflow | 74f3e2fd0358621ac785a5f1441645be2d69688b | cbb8964eba7d58a5e87f96fb5bb91ac452b80763 | refs/heads/master | 2023-01-06T02:51:27.881742 | 2020-04-15T21:18:29 | 2020-04-15T21:18:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,083 | py | # Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
"""Environment Attribute Model"""
from __future__ import (absolute_import, print_function,
division, unicode_literals)
from sqlalchemy import Column, Integer, Text
from sqlalchemy import PrimaryKeyConstraint, ForeignKeyConstraint
from ...utils.prolog import PrologDescription, PrologTrial, PrologRepr
from .base import AlchemyProxy, proxy_class
@proxy_class
class EnvironmentAttr(AlchemyProxy):
"""Represent an environment attribute"""
__tablename__ = "environment_attr"
__table_args__ = (
PrimaryKeyConstraint("trial_id", "id"),
ForeignKeyConstraint(["trial_id"], ["trial.id"], ondelete="CASCADE"),
)
trial_id = Column(Integer, index=True)
id = Column(Integer, index=True) # pylint: disable=invalid-name
name = Column(Text)
value = Column(Text)
# Relationship attributes (see relationships.py):
# trial: 1 Trial
prolog_description = PrologDescription("environment", (
PrologTrial("trial_id", link="trial.id"),
PrologRepr("name"),
PrologRepr("value"),
), description=(
"informs that a environment attribute (*name*)\n"
"was defined with *value*\n"
"in a given trial (*trial_id*)."
))
@property
def brief(self):
"""Brief description of environment attribute"""
return self.name
def __hash__(self):
return hash((self.name, self.value))
def __eq__(self, other):
return self.name == other.name
def show(self, _print=lambda x, offset=0: print(x)):
"""Show object
Keyword arguments:
_print -- custom print function (default=print)
"""
_print("{0.name}: {0.value}".format(self))
def __repr__(self):
return "Environment({0.trial_id}, {0.name}, {0.value})".format(self)
| [
"joaofelipenp@gmail.com"
] | joaofelipenp@gmail.com |
c112fd035736511a5982689e9b5805ab59d9d74e | 48bee4325136caea5a0aa447a16f000382ba2192 | /src/class_based_views/urls.py | 24eb4fea926bd62e596c49aaf2864d6748c060d8 | [] | no_license | nova-sangeeth/book_stack_django | f960e39beb929ee829cf0d4555de27a22ca98ee2 | a1e551f364ee6616b4b7804f14bfa493a1170936 | refs/heads/master | 2021-03-21T18:38:09.827632 | 2020-03-17T16:03:38 | 2020-03-17T16:03:38 | 247,321,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | """class_based_views URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('book_stack.urls')),
]
| [
"novasangeeth@outlook.com"
] | novasangeeth@outlook.com |
ef497c88746e8bec9bf1fe0a637dd05339029c94 | 73361fc6f7ecd9a19359a828b2574499a991bde4 | /gallery2/alembic/versions/3b387b077506_tags.py | 0e50457320a7e40f4de8f43b0812513c399eb1f1 | [] | no_license | danjac/gallery2 | 3a28cd3ca364a30eaf277bfd9db3cac72dd2463a | ff8c50bdfc30d9ac5fff910589b7f976a4b40bcf | refs/heads/master | 2020-05-19T07:59:00.047308 | 2014-01-15T13:44:06 | 2014-01-15T13:44:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,272 | py | """tags
Revision ID: 3b387b077506
Revises: 48641cbf69d7
Create Date: 2014-01-11 11:01:56.791153
"""
# revision identifiers, used by Alembic.
revision = '3b387b077506'
down_revision = '48641cbf69d7'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('tags',
sa.Column('name', sa.Unicode(length=200), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('tagged_images',
sa.Column('image_id', sa.Integer(), nullable=True),
sa.Column('tag_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['image_id'], [u'images.id'], ),
sa.ForeignKeyConstraint(['tag_id'], [u'tags.id'], )
)
op.add_column(u'images', sa.Column('tagstring', sa.UnicodeText(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column(u'images', 'tagstring')
op.drop_table('tagged_images')
op.drop_table('tags')
### end Alembic commands ###
| [
"danjac354@gmail.com"
] | danjac354@gmail.com |
372bd628e91fbeb9aec915bafa8af0824234caf7 | a0e777ea7e0d00c061068db132a30a8fa545cc75 | /FluentPython/coro_exc_demo.py | d91049232e9f0fe4694695ec88f001a1a1574ad1 | [] | no_license | aadisetyaa/Python-Cookbook | 87215b64d2d3631d6b18e90a68a09400e7d80919 | a8df0343a39725312686423296bfd860dbaf70ad | refs/heads/master | 2022-04-08T13:41:27.255352 | 2017-11-27T03:54:29 | 2017-11-27T03:54:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | class DemoException(Exception):
"""An exception type for the demonstration."""
def demo_exc_handling():
print('-> coroutine started')
while True:
try:
x = yield
except DemoException:
print('*** DemoException handled. Continuing...')
else:
print('-> coroutine received: {!r}'.format(x))
raise RuntimeError('This line should never run.')
#exc_coro = demo_exc_handling()
#next(exc_coro)
#exc_coro.send(11)
#exc_coro.send(22)
#exc_coro.close()
from inspect import getgeneratorstate
#print(getgeneratorstate(exc_coro))
#16-10
#exc_coro = demo_exc_handling()
#next(exc_coro)
#exc_coro.send(11)
#exc_coro.throw(DemoException)
#print(getgeneratorstate(exc_coro))
#16-11
exc_coro = demo_exc_handling()
next(exc_coro)
exc_coro.send(11)
exc_coro.throw(ZeroDivisionError)
print(getgeneratorstate(exc_coro))
| [
"wpr101@hotmail.com"
] | wpr101@hotmail.com |
33c243c408ec59eec4ffca44e97be6a52731d741 | 8d753bb8f19b5b1f526b0688d3cb199b396ed843 | /osp_sai_2.1.8/system/apps/rpcapi/vcl/config.py | 9b18d6ac23705afdaa4d0d8f5c3548bd9a168816 | [] | no_license | bonald/vim_cfg | f166e5ff650db9fa40b564d05dc5103552184db8 | 2fee6115caec25fd040188dda0cb922bfca1a55f | refs/heads/master | 2023-01-23T05:33:00.416311 | 2020-11-19T02:09:18 | 2020-11-19T02:09:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | #!/usr/bin/python
#-*- coding: utf-8 -*-
is_dev = True
remote = '10.10.39.167'
dut_regexp = r'DUT\d#'
dut_stdout_pwd_info = r'Password:'
shell_pwd = '!@#'
cmd_regexp = r'\[\w+@.+\]\$'
login_stdout_username_info = r'Username:'
login_stdout_pwd_info = r'Password:'
login_username = 'admin'
login_pwd = 'admin'
RPC_ERROR_SUCCESS = 0
RPC_ERROR_CLI_TIMEOUT = -1000
RPC_ERROR_CLI_FAIL = -1001
RPC_ERROR_CLI_AUTH_FAIL = -1002
RPC_ERROR_CLI_AUTH_LOW = -1003
RPC_ERROR_CLI_NOT_SUPPORT = -1004
RPC_ERROR_CHAR_NOT_SUPPORT = -1005
RPC_ERROR_STRING_NOT_SUPPORT = -1006
RPC_ERROR_MESSAGE_NOT_SUPPORT = -1007
| [
"zhwwan@gmail.com"
] | zhwwan@gmail.com |
ab0c31da7017d28b92c6f4359ffeff58c6e480e1 | af4b5830b2a23d1f3d126297c7eb057bb3f8e42f | /pymatflow/cp2k/base/pw_dft_iterative_solver.py | add890dd42de47199b7e0d715dad071d6063957a | [
"MIT"
] | permissive | mukhtarbayerouniversity/pymatflow | de2b2d573ceed68c1dd3c149c538588394029137 | 9ab61e56659519cd6c83d5bd32da1262f44da065 | refs/heads/master | 2023-02-13T01:50:32.993401 | 2021-01-13T15:19:36 | 2021-01-13T15:19:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,063 | py | #!/usr/bin/evn python
# _*_ coding: utf-8 _*_
import numpy as np
import sys
import os
import shutil
"""
usage:
"""
# ============================================
# CP2K / PW_DFT / ITERATIVE_SOLVER
#=============================================
class cp2k_pw_dft_iterative_solver:
"""
"""
def __init__(self):
"""
"""
self.params = {
}
self.status = False
# basic setting
def to_input(self, fout):
"""
fout: a file stream for writing
"""
fout.write("\t\t&ITERATIVE_SOLVER\n")
for item in self.params:
if self.params[item] is not None:
fout.write("\t\t\t%s %s\n" % (item, self.params[item]))
fout.write("\t\t&END ITERATIVE_SOLVER\n")
def set_params(self, params):
for item in params:
if len(item.split("-")) == 3:
self.params[item.split("-")[-1]] = params[item]
else:
pass
| [
"deqi_tang@163.com"
] | deqi_tang@163.com |
4595c1a0ffd3faf673a9269141df638ad665012f | 37d5b97c54e48f3de690724c01f3b14248c2b194 | /origin/cartpole-a3c.py | 42cb24898277d39b5f3ade93e1e7f38d89d786f2 | [] | no_license | verystrongjoe/a3c-sketch | ae9e60ee87155b7991a7fab4dfa55702e4cc56e9 | 7c8d9bfc76396ef652b609f1b366f98807adbf53 | refs/heads/master | 2020-03-06T14:41:04.358586 | 2018-05-08T09:37:32 | 2018-05-08T09:37:32 | 126,940,203 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,911 | py | import threading
import numpy as np
import tensorflow as tf
import pylab
import time
import gym
from keras.layers import Dense, Input
from keras.models import Model
from keras.optimizers import Adam
from keras import backend as K
# global variables for threading
episode = 0
scores = []
EPISODES = 2000
# This is A3C(Asynchronous Advantage Actor Critic) agent(global) for the Cartpole
# In this example, we use A3C algorithm
class A3CAgent:
def __init__(self, state_size, action_size, env_name):
# get size of state and action
self.state_size = state_size
self.action_size = action_size
# get gym environment name
self.env_name = env_name
# these are hyper parameters for the A3C
self.actor_lr = 0.001
self.critic_lr = 0.001
self.discount_factor = .99
self.hidden1, self.hidden2 = 24, 24
self.threads = 8
# create model for actor and critic network
self.actor, self.critic = self.build_model()
# method for training actor and critic network
self.optimizer = [self.actor_optimizer(), self.critic_optimizer()]
self.sess = tf.InteractiveSession()
K.set_session(self.sess)
self.sess.run(tf.global_variables_initializer())
# approximate policy and value using Neural Network
# actor -> state is input and probability of each action is output of network
# critic -> state is input and value of state is output of network
# actor and critic network share first hidden layer
def build_model(self):
state = Input(batch_shape=(None, self.state_size))
shared = Dense(self.hidden1, input_dim=self.state_size, activation='relu', kernel_initializer='glorot_uniform')(state)
actor_hidden = Dense(self.hidden2, activation='relu', kernel_initializer='glorot_uniform')(shared)
action_prob = Dense(self.action_size, activation='softmax', kernel_initializer='glorot_uniform')(actor_hidden)
value_hidden = Dense(self.hidden2, activation='relu', kernel_initializer='he_uniform')(shared)
state_value = Dense(1, activation='linear', kernel_initializer='he_uniform')(value_hidden)
actor = Model(inputs=state, outputs=action_prob)
critic = Model(inputs=state, outputs=state_value)
actor._make_predict_function()
critic._make_predict_function()
actor.summary()
critic.summary()
return actor, critic
# make loss function for Policy Gradient
# [log(action probability) * advantages] will be input for the back prop
# we add entropy of action probability to loss
def actor_optimizer(self):
action = K.placeholder(shape=(None, self.action_size))
advantages = K.placeholder(shape=(None, ))
policy = self.actor.output
good_prob = K.sum(action * policy, axis=1)
eligibility = K.log(good_prob + 1e-10) * K.stop_gradient(advantages)
loss = -K.sum(eligibility)
entropy = K.sum(policy * K.log(policy + 1e-10), axis=1)
actor_loss = loss + 0.01*entropy
optimizer = Adam(lr=self.actor_lr)
updates = optimizer.get_updates(self.actor.trainable_weights, [], actor_loss)
train = K.function([self.actor.input, action, advantages], [], updates=updates)
return train #, optimizer.get_gradients(actor_loss, self.actor.trainable_weights)
# make loss function for Value approximation
def critic_optimizer(self):
discounted_reward = K.placeholder(shape=(None, ))
value = self.critic.output
loss = K.mean(K.square(discounted_reward - value))
optimizer = Adam(lr=self.critic_lr)
updates = optimizer.get_updates(self.critic.trainable_weights, [], loss)
train = K.function([self.critic.input, discounted_reward], [], updates=updates)
return train
# make agents(local) and start training
def train(self):
# self.load_model('./save_model/cartpole_a3c.h5')
agents = [Agent(i, self.actor, self.critic, self.optimizer, self.env_name, self.discount_factor,
self.action_size, self.state_size) for i in range(self.threads)]
for agent in agents:
agent.start()
while True:
time.sleep(20)
plot = scores[:]
pylab.plot(range(len(plot)), plot, 'b')
pylab.savefig("./save_graph/cartpole_a3c.png")
self.save_model('./save_model/cartpole_a3c.h5')
def save_model(self, name):
self.actor.save_weights(name + "_actor.h5")
self.critic.save_weights(name + "_critic.h5")
def load_model(self, name):
self.actor.load_weights(name + "_actor.h5")
self.critic.load_weights(name + "_critic.h5")
# This is Agent(local) class for threading
class Agent(threading.Thread):
def __init__(self, index, actor, critic, optimizer, env_name, discount_factor, action_size, state_size):
threading.Thread.__init__(self)
self.states = []
self.rewards = []
self.actions = []
self.index = index
self.actor = actor
self.critic = critic
self.optimizer = optimizer
self.env_name = env_name
self.discount_factor = discount_factor
self.action_size = action_size
self.state_size = state_size
# Thread interactive with environment
def run(self):
global episode
env = gym.make(self.env_name)
while episode < EPISODES:
state = env.reset()
score = 0
while True:
action = self.get_action(state)
next_state, reward, done, _ = env.step(action)
score += reward
self.memory(state, action, reward)
state = next_state
if done:
episode += 1
print("episode: ", episode, "/ score : ", score)
scores.append(score)
self.train_episode(score != 500)
break
# In Policy Gradient, Q function is not available.
# Instead agent uses sample returns for evaluating policy
def discount_rewards(self, rewards, done=True):
discounted_rewards = np.zeros_like(rewards)
running_add = 0
if not done:
running_add = self.critic.predict(np.reshape(self.states[-1], (1, self.state_size)))[0]
for t in reversed(range(0, len(rewards))):
running_add = running_add * self.discount_factor + rewards[t]
discounted_rewards[t] = running_add
return discounted_rewards
# save <s, a ,r> of each step
# this is used for calculating discounted rewards
def memory(self, state, action, reward):
self.states.append(state)
act = np.zeros(self.action_size)
act[action] = 1
self.actions.append(act)
self.rewards.append(reward)
# update policy network and value network every episode
def train_episode(self, done):
discounted_rewards = self.discount_rewards(self.rewards, done)
values = self.critic.predict(np.array(self.states))
values = np.reshape(values, len(values))
advantages = discounted_rewards - values
self.optimizer[0]([self.states, self.actions, advantages])
self.optimizer[1]([self.states, discounted_rewards])
self.states, self.actions, self.rewards = [], [], []
def get_action(self, state):
policy = self.actor.predict(np.reshape(state, [1, self.state_size]))[0]
return np.random.choice(self.action_size, 1, p=policy)[0]
if __name__ == "__main__":
env_name = 'CartPole-v1'
env = gym.make(env_name)
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
env.close()
global_agent = A3CAgent(state_size, action_size, env_name)
global_agent.train() | [
"verystrongjoe@gmail.com"
] | verystrongjoe@gmail.com |
d6f6ff67d8a4ab492c8c24d10ca6a25088ee5e15 | de59ece5d773d8607ba7afe747088ff07062494c | /py-core/tuples/tuples.py | 6198cdf35d43671e60083fd9e761259bb727ab13 | [] | no_license | loggar/py | 4094c6919b040dfc0bb5453dc752145b5f3b46ba | 1116969fa6de00bbc30fe8dcf6445aa46190e506 | refs/heads/master | 2023-08-21T16:47:41.721298 | 2023-08-14T16:12:27 | 2023-08-14T16:12:27 | 114,955,782 | 0 | 0 | null | 2023-07-20T15:11:04 | 2017-12-21T03:01:54 | Python | UTF-8 | Python | false | false | 556 | py | tuple1 = ('abcd', 786, 2.23, 'john', 70.2)
tinytuple = (123, 'john')
print(tuple1) # Prints complete tuple
print(tuple1[0]) # Prints first element of the tuple
print(tuple1[1:3]) # Prints elements starting from 2nd till 3rd
print(tuple1[2:]) # Prints elements starting from 3rd element
print(tinytuple * 2) # Prints tuple two times
print(tuple1 + tinytuple) # Prints concatenated tuple
list1 = ['abcd', 786, 2.23, 'john', 70.2]
# tuple1[2] = 1000 # Invalid syntax with tuple
list1[2] = 1000 # Valid syntax with list
| [
"webnl@DT-Charly.koi.local"
] | webnl@DT-Charly.koi.local |
9f25f18ca52a83de85052b14daa2717fc9df8fc1 | 13a416a2694d1f6aa1a68cd47610236bf61cafbc | /BasicConcept/hex_oct_bin.py | a8b18ae593af3c90d6a27996c1f0464a99110a3f | [] | no_license | Highjune/Python | c637f7d0f9e5d1ac9d6ad87b4e54833b8ff4ae11 | 1be43816d22f5f3b8679cf0cd3939e9d9f54497a | refs/heads/master | 2022-11-24T01:20:54.470172 | 2020-07-27T18:01:47 | 2020-07-27T18:01:47 | 263,271,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | a = 128
print(a)
print(hex(a))
print(oct(a))
print(bin(a))
| [
"highjune37@gmail.com"
] | highjune37@gmail.com |
7f78e5f3f6a29d10301db18f96bebe97812cf0ac | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nneliza.py | 528eb4b8269e9139fcfcfc93d5170548eef4edfd | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 295 | py | ii = [('CookGHP.py', 1), ('LeakWTI2.py', 1), ('ClarGE2.py', 1), ('CrokTPS.py', 1), ('GilmCRS.py', 3), ('WadeJEB.py', 7), ('LeakWTI4.py', 1), ('WheeJPT.py', 11), ('MereHHB3.py', 2), ('FitzRNS.py', 1), ('LewiMJW.py', 1), ('ClarGE3.py', 3), ('DibdTRL.py', 1), ('HogaGMM2.py', 7), ('ClarGE4.py', 1)] | [
"varunwachaspati@gmail.com"
] | varunwachaspati@gmail.com |
aa6662318c74771c0ab32e588e4a200a782bdb3a | 556db265723b0cc30ad2917442ed6dad92fd9044 | /tensorflow/python/client/pywrap_tf_session.py | 96f42ef026b82b874aacfc3b1778ce72ac5c3785 | [
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | graphcore/tensorflow | c1669b489be0e045b3ec856b311b3139858de196 | 085b20a4b6287eff8c0b792425d52422ab8cbab3 | refs/heads/r2.6/sdk-release-3.2 | 2023-07-06T06:23:53.857743 | 2023-03-14T13:04:04 | 2023-03-14T13:48:43 | 162,717,602 | 84 | 17 | Apache-2.0 | 2023-03-25T01:13:37 | 2018-12-21T13:30:38 | C++ | UTF-8 | Python | false | false | 2,973 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python module for Session ops, vars, and functions exported by pybind11."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=invalid-import-order,g-bad-import-order, wildcard-import, unused-import
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client._pywrap_tf_session import *
from tensorflow.python.client._pywrap_tf_session import _TF_SetTarget
from tensorflow.python.client._pywrap_tf_session import _TF_SetConfig
from tensorflow.python.client._pywrap_tf_session import _TF_NewSessionOptions
# Convert versions to strings for Python2 and keep api_compatibility_test green.
# We can remove this hack once we remove Python2 presubmits. pybind11 can only
# return unicode for Python2 even with py::str.
# https://pybind11.readthedocs.io/en/stable/advanced/cast/strings.html#returning-c-strings-to-python
# pylint: disable=undefined-variable
__version__ = str(get_version())
__git_version__ = str(get_git_version())
__compiler_version__ = str(get_compiler_version())
__cxx11_abi_flag__ = get_cxx11_abi_flag()
__monolithic_build__ = get_monolithic_build()
# User getters to hold attributes rather than pybind11's m.attr due to
# b/145559202.
GRAPH_DEF_VERSION = get_graph_def_version()
GRAPH_DEF_VERSION_MIN_CONSUMER = get_graph_def_version_min_consumer()
GRAPH_DEF_VERSION_MIN_PRODUCER = get_graph_def_version_min_producer()
TENSOR_HANDLE_KEY = get_tensor_handle_key()
# pylint: enable=undefined-variable
# Disable pylint invalid name warnings for legacy functions.
# pylint: disable=invalid-name
def TF_NewSessionOptions(target=None, config=None):
# NOTE: target and config are validated in the session constructor.
opts = _TF_NewSessionOptions()
if target is not None:
_TF_SetTarget(opts, target)
if config is not None:
config_str = config.SerializeToString()
_TF_SetConfig(opts, config_str)
return opts
# Disable pylind undefined-variable as the variable is exported in the shared
# object via pybind11.
# pylint: disable=undefined-variable
def TF_Reset(target, containers=None, config=None):
opts = TF_NewSessionOptions(target=target, config=config)
try:
TF_Reset_wrapper(opts, containers)
finally:
TF_DeleteSessionOptions(opts)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
8b0b6d0d068e5c0c0d871706b8de364a8b45b874 | 805593291790843645dc884616c2a672f9cf953a | /graph/spanning_tree/kuraskal.py | 9959bd549a7ac8edeac8ec868b9f23f7ff5978db | [] | no_license | Shumpei-Kikuta/library | 1aa3e5aa1a619734441b431eaf2a872784030ee0 | cfa5a035df2e98641259032c936e063767e53230 | refs/heads/master | 2020-08-03T01:03:31.859006 | 2019-10-25T01:58:30 | 2019-10-25T01:58:30 | 211,575,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,336 | py | """クラスカル法"""
class Node:
def __init__(self, idx):
self.idx = idx
self.parent = None
def unite(x: Node, y: Node, nodes):
"""xを含む集合とyを含む集合を併合"""
x_root, x_depth = root(x, 0)
y_root, y_depth = root(y, 0)
# xの根を併合後の根とする
if y_root != x_root:
if x_depth >= y_depth:
y_root.parent = x_root
nodes[y_root.idx] = y_root
else:
x_root.parent = y_root
nodes[x_root.idx] = x_root
return nodes
def same(x: Node, y: Node):
"""xとyが同じ集合に所属するか?すれば1, しなければ0を返す"""
x_root, _ = root(x, 0)
y_root, _ = root(y, 0)
if x_root.idx == y_root.idx:
return 1
else:
return 0
def root(x: Node, cnt: int):
"""Node xの所属する木の根を探索"""
if x.parent is None:
return x, cnt
else:
return root(x.parent, cnt + 1)
def initialize_adjlists(lists, V):
for i in range(V):
lists[i] = []
return lists
def adjacency_lists2kuraskal_list(adjacency_lists: dict) -> dict:
"""
OUTPUT: {(from_, to_): weight}
"""
dicts = {}
for from_ in adjacency_lists:
for to_, weight in adjacency_lists[from_]:
dicts[(from_, to_)] = weight
return dicts
def kuraskal(adjacency_lists: dict):
V = len(adjacency_lists)
kuraskal_lists = adjacency_lists2kuraskal_list(adjacency_lists)
kuraskal_lists = sorted(kuraskal_lists.items(), key=lambda x: x[1])
nodes = []
for i in range(V):
node = Node(i)
nodes.append(node)
num = 0
for (from_, to_), weight in kuraskal_lists:
if same(nodes[from_], nodes[to_]):
continue
else:
nodes = unite(nodes[from_], nodes[to_], nodes)
num += weight
return num
def main():
V = int(input())
adjacency_lists = {} # key: node, value: (node, weight)
adjacency_lists = initialize_adjlists(adjacency_lists, V)
for i in range(V):
lists = [int(c) for c in input().split()]
for j, w in enumerate(lists):
if w == -1:
continue
adjacency_lists[i].append((j, w))
print(kuraskal(adjacency_lists))
if __name__ == '__main__':
main()
| [
"shunpei-kikuta775@g.ecc.u-tokyo.ac.jp"
] | shunpei-kikuta775@g.ecc.u-tokyo.ac.jp |
2e9b4bb92dd86faf0815ab836243cd924a9fd5ca | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc088/C/4904355.py | 50700176dd66473e27c6decd361c1ba164066058 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | c1 = list(map(int, input().rstrip().split()))
c2 = list(map(int, input().rstrip().split()))
c3 = list(map(int, input().rstrip().split()))
answer = 'No'
#a1?????a2,a3,b1,b2,b3?????????
for i in range(101):
a1 = i
b1 = c1[0] - a1
b2 = c1[1] - a1
b3 = c1[2] - a1
a2 = c2[0] - b1
a3 = c3[0] - b1
c2_2 = a2 + b2
c2_3 = a2 + b3
c3_2 = a3 + b2
c3_3 = a3 + b3
if c2_2 == c2[1] and c2_3 == c2[2] and c3_2 == c3[1] and c3_3 == c3[2]:
answer = 'Yes'
break
print(answer) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
bbc37a7671145d04d2e64fa21022a08e44c18e17 | 16e69196886254bc0fe9d8dc919ebcfa844f326a | /edc/subject/registration/migrations/0019_auto__add_registeredsubjectaudit.py | 21cbfc809d495d0a1dcf484b56bf849769ed7729 | [] | no_license | botswana-harvard/edc | b54edc305e7f4f6b193b4498c59080a902a6aeee | 4f75336ff572babd39d431185677a65bece9e524 | refs/heads/master | 2021-01-23T19:15:08.070350 | 2015-12-07T09:36:41 | 2015-12-07T09:36:41 | 35,820,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,421 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'RegisteredSubjectAudit'
db.create_table('bhp_registration_registeredsubject_audit', (
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('user_created', self.gf('django.db.models.fields.CharField')(default='', max_length=250)),
('user_modified', self.gf('django.db.models.fields.CharField')(default='', max_length=250)),
('hostname_created', self.gf('django.db.models.fields.CharField')(default='home', max_length=50, blank=True)),
('hostname_modified', self.gf('django.db.models.fields.CharField')(default='home', max_length=50, blank=True)),
('id', self.gf('django.db.models.fields.CharField')(max_length=36, blank=True)),
('subject_consent_id', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('subject_identifier', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=36, null=True, blank=True)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('initials', self.gf('django.db.models.fields.CharField')(max_length=3)),
('gender', self.gf('django.db.models.fields.CharField')(max_length=1, null=True, blank=True)),
('subject_type', self.gf('django.db.models.fields.CharField')(max_length=25)),
('screening_datetime', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('registration_datetime', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('randomization_datetime', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('registration_status', self.gf('django.db.models.fields.CharField')(max_length=25, null=True, blank=True)),
('_audit_subject_identifier', self.gf('django.db.models.fields.CharField')(max_length=50, null=True)),
('registration_identifier', self.gf('django.db.models.fields.CharField')(max_length=36, null=True, blank=True)),
('sid', self.gf('django.db.models.fields.CharField')(max_length=15, null=True, blank=True)),
('study_site', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='_audit_registeredsubject', null=True, to=orm['bhp_variables.StudySite'])),
('relative_identifier', self.gf('django.db.models.fields.CharField')(max_length=25, null=True, blank=True)),
('identity', self.gf('django.db.models.fields.CharField')(max_length=25, null=True, blank=True)),
('dob', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('is_dob_estimated', self.gf('django.db.models.fields.CharField')(max_length=25, null=True, blank=True)),
('may_store_samples', self.gf('django.db.models.fields.CharField')(default='?', max_length=3)),
('comment', self.gf('django.db.models.fields.TextField')(max_length=250, null=True, blank=True)),
('_audit_id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('_audit_timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('_audit_change_type', self.gf('django.db.models.fields.CharField')(max_length=1)),
))
db.send_create_signal('bhp_registration', ['RegisteredSubjectAudit'])
# Removing index on 'RegisteredSubject', fields ['registration_identifier']
db.delete_index('bhp_registration_registeredsubject', ['registration_identifier'])
def backwards(self, orm):
# Adding index on 'RegisteredSubject', fields ['registration_identifier']
db.create_index('bhp_registration_registeredsubject', ['registration_identifier'])
# Deleting model 'RegisteredSubjectAudit'
db.delete_table('bhp_registration_registeredsubject_audit')
models = {
'bhp_registration.randomizedsubject': {
'Meta': {'object_name': 'RandomizedSubject'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'initials': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'randomization_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'registration_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'registration_status': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'screening_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subject_consent_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'subject_identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '36', 'null': 'True', 'blank': 'True'}),
'subject_type': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'bhp_registration.registeredsubject': {
'Meta': {'object_name': 'RegisteredSubject'},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'identity': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'initials': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'is_dob_estimated': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'may_store_samples': ('django.db.models.fields.CharField', [], {'default': "'?'", 'max_length': '3'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'randomization_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'registration_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'registration_identifier': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'registration_status': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'relative_identifier': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'screening_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sid': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'study_site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['bhp_variables.StudySite']", 'null': 'True', 'blank': 'True'}),
'subject_consent_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'subject_identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '36', 'null': 'True', 'blank': 'True'}),
'subject_type': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'bhp_registration.registeredsubjectaudit': {
'Meta': {'ordering': "['-_audit_timestamp']", 'object_name': 'RegisteredSubjectAudit', 'db_table': "'bhp_registration_registeredsubject_audit'"},
'_audit_change_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'_audit_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'_audit_subject_identifier': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'_audit_timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'identity': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'initials': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'is_dob_estimated': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'may_store_samples': ('django.db.models.fields.CharField', [], {'default': "'?'", 'max_length': '3'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'randomization_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'registration_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'registration_identifier': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'registration_status': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'relative_identifier': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'screening_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sid': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'study_site': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_audit_registeredsubject'", 'null': 'True', 'to': "orm['bhp_variables.StudySite']"}),
'subject_consent_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'subject_identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '36', 'null': 'True', 'blank': 'True'}),
'subject_type': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'bhp_registration.subjectidentifieraudittrail': {
'Meta': {'ordering': "['-date_allocated']", 'object_name': 'SubjectIdentifierAuditTrail'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'date_allocated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.date(2011, 11, 18)'}),
'dob': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initials': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'subject_consent_id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'subject_identifier': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'bhp_variables.studysite': {
'Meta': {'ordering': "['site_code']", 'unique_together': "[('site_code', 'site_name')]", 'object_name': 'StudySite'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'site_code': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'site_name': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
}
}
complete_apps = ['bhp_registration']
| [
"ew2789@gmail.com"
] | ew2789@gmail.com |
631fc594d1affe6040910bd0d37faf606980fb06 | f8ff84f02d6dfa66d003890c4f51ea575232ba93 | /keystone/keystone/tests/unit/contrib/federation/test_utils.py | b59c234707f65898545269f1b426d442d93797df | [
"Apache-2.0"
] | permissive | zarson/stack | 8d341463bdf0136447bf1ada5be943df8ba55a4b | 827003bc566ed992f754618063a771694e51cfca | refs/heads/master | 2021-06-03T00:49:19.075199 | 2016-05-12T07:45:35 | 2016-05-12T07:45:35 | 58,616,957 | 0 | 1 | null | 2020-07-24T01:59:08 | 2016-05-12T07:08:17 | Python | UTF-8 | Python | false | false | 31,784 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_serialization import jsonutils
from keystone.auth.plugins import mapped
from keystone import exception
from keystone.federation import utils as mapping_utils
from keystone.tests import unit
from keystone.tests.unit import mapping_fixtures
FAKE_MAPPING_ID = uuid.uuid4().hex
class MappingRuleEngineTests(unit.BaseTestCase):
"""A class for testing the mapping rule engine."""
def assertValidMappedUserObject(self, mapped_properties,
user_type='ephemeral',
domain_id=None):
"""Check whether mapped properties object has 'user' within.
According to today's rules, RuleProcessor does not have to issue user's
id or name. What's actually required is user's type and for ephemeral
users that would be service domain named 'Federated'.
"""
self.assertIn('user', mapped_properties,
message='Missing user object in mapped properties')
user = mapped_properties['user']
self.assertIn('type', user)
self.assertEqual(user_type, user['type'])
self.assertIn('domain', user)
domain = user['domain']
domain_name_or_id = domain.get('id') or domain.get('name')
domain_ref = domain_id or 'Federated'
self.assertEqual(domain_ref, domain_name_or_id)
def test_rule_engine_any_one_of_and_direct_mapping(self):
"""Should return user's name and group id EMPLOYEE_GROUP_ID.
The ADMIN_ASSERTION should successfully have a match in MAPPING_LARGE.
They will test the case where `any_one_of` is valid, and there is
a direct mapping for the users name.
"""
mapping = mapping_fixtures.MAPPING_LARGE
assertion = mapping_fixtures.ADMIN_ASSERTION
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
values = rp.process(assertion)
fn = assertion.get('FirstName')
ln = assertion.get('LastName')
full_name = '%s %s' % (fn, ln)
group_ids = values.get('group_ids')
user_name = values.get('user', {}).get('name')
self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids)
self.assertEqual(full_name, user_name)
def test_rule_engine_no_regex_match(self):
"""Should deny authorization, the email of the tester won't match.
This will not match since the email in the assertion will fail
the regex test. It is set to match any @example.com address.
But the incoming value is set to eviltester@example.org.
RuleProcessor should raise ValidationError.
"""
mapping = mapping_fixtures.MAPPING_LARGE
assertion = mapping_fixtures.BAD_TESTER_ASSERTION
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
self.assertRaises(exception.ValidationError,
rp.process,
assertion)
def test_rule_engine_regex_many_groups(self):
"""Should return group CONTRACTOR_GROUP_ID.
The TESTER_ASSERTION should successfully have a match in
MAPPING_TESTER_REGEX. This will test the case where many groups
are in the assertion, and a regex value is used to try and find
a match.
"""
mapping = mapping_fixtures.MAPPING_TESTER_REGEX
assertion = mapping_fixtures.TESTER_ASSERTION
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
values = rp.process(assertion)
self.assertValidMappedUserObject(values)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('user', {}).get('name')
self.assertEqual(user_name, name)
self.assertIn(mapping_fixtures.TESTER_GROUP_ID, group_ids)
def test_rule_engine_any_one_of_many_rules(self):
"""Should return group CONTRACTOR_GROUP_ID.
The CONTRACTOR_ASSERTION should successfully have a match in
MAPPING_SMALL. This will test the case where many rules
must be matched, including an `any_one_of`, and a direct
mapping.
"""
mapping = mapping_fixtures.MAPPING_SMALL
assertion = mapping_fixtures.CONTRACTOR_ASSERTION
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
values = rp.process(assertion)
self.assertValidMappedUserObject(values)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('user', {}).get('name')
self.assertEqual(user_name, name)
self.assertIn(mapping_fixtures.CONTRACTOR_GROUP_ID, group_ids)
def test_rule_engine_not_any_of_and_direct_mapping(self):
"""Should return user's name and email.
The CUSTOMER_ASSERTION should successfully have a match in
MAPPING_LARGE. This will test the case where a requirement
has `not_any_of`, and direct mapping to a username, no group.
"""
mapping = mapping_fixtures.MAPPING_LARGE
assertion = mapping_fixtures.CUSTOMER_ASSERTION
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
values = rp.process(assertion)
self.assertValidMappedUserObject(values)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('user', {}).get('name')
self.assertEqual(user_name, name)
self.assertEqual([], group_ids,)
def test_rule_engine_not_any_of_many_rules(self):
"""Should return group EMPLOYEE_GROUP_ID.
The EMPLOYEE_ASSERTION should successfully have a match in
MAPPING_SMALL. This will test the case where many remote
rules must be matched, including a `not_any_of`.
"""
mapping = mapping_fixtures.MAPPING_SMALL
assertion = mapping_fixtures.EMPLOYEE_ASSERTION
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
values = rp.process(assertion)
self.assertValidMappedUserObject(values)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('user', {}).get('name')
self.assertEqual(user_name, name)
self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids)
def test_rule_engine_not_any_of_regex_verify_pass(self):
"""Should return group DEVELOPER_GROUP_ID.
The DEVELOPER_ASSERTION should successfully have a match in
MAPPING_DEVELOPER_REGEX. This will test the case where many
remote rules must be matched, including a `not_any_of`, with
regex set to True.
"""
mapping = mapping_fixtures.MAPPING_DEVELOPER_REGEX
assertion = mapping_fixtures.DEVELOPER_ASSERTION
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
values = rp.process(assertion)
self.assertValidMappedUserObject(values)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('user', {}).get('name')
self.assertEqual(user_name, name)
self.assertIn(mapping_fixtures.DEVELOPER_GROUP_ID, group_ids)
def test_rule_engine_not_any_of_regex_verify_fail(self):
"""Should deny authorization.
The email in the assertion will fail the regex test.
It is set to reject any @example.org address, but the
incoming value is set to evildeveloper@example.org.
RuleProcessor should yield ValidationError.
"""
mapping = mapping_fixtures.MAPPING_DEVELOPER_REGEX
assertion = mapping_fixtures.BAD_DEVELOPER_ASSERTION
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
self.assertRaises(exception.ValidationError,
rp.process,
assertion)
def _rule_engine_regex_match_and_many_groups(self, assertion):
"""Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID.
A helper function injecting assertion passed as an argument.
Expect DEVELOPER_GROUP_ID and TESTER_GROUP_ID in the results.
"""
mapping = mapping_fixtures.MAPPING_LARGE
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
values = rp.process(assertion)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('user', {}).get('name')
self.assertValidMappedUserObject(values)
self.assertEqual(user_name, name)
self.assertIn(mapping_fixtures.DEVELOPER_GROUP_ID, group_ids)
self.assertIn(mapping_fixtures.TESTER_GROUP_ID, group_ids)
def test_rule_engine_regex_match_and_many_groups(self):
"""Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID.
The TESTER_ASSERTION should successfully have a match in
MAPPING_LARGE. This will test a successful regex match
for an `any_one_of` evaluation type, and will have many
groups returned.
"""
self._rule_engine_regex_match_and_many_groups(
mapping_fixtures.TESTER_ASSERTION)
def test_rule_engine_discards_nonstring_objects(self):
"""Check whether RuleProcessor discards non string objects.
Despite the fact that assertion is malformed and contains
non string objects, RuleProcessor should correctly discard them and
successfully have a match in MAPPING_LARGE.
"""
self._rule_engine_regex_match_and_many_groups(
mapping_fixtures.MALFORMED_TESTER_ASSERTION)
def test_rule_engine_fails_after_discarding_nonstring(self):
"""Check whether RuleProcessor discards non string objects.
Expect RuleProcessor to discard non string object, which
is required for a correct rule match. RuleProcessor will result with
ValidationError.
"""
mapping = mapping_fixtures.MAPPING_SMALL
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
assertion = mapping_fixtures.CONTRACTOR_MALFORMED_ASSERTION
self.assertRaises(exception.ValidationError,
rp.process,
assertion)
def test_using_remote_direct_mapping_that_doesnt_exist_fails(self):
"""Test for the correct error when referring to a bad remote match.
The remote match must exist in a rule when a local section refers to
a remote matching using the format (e.g. {0} in a local section).
"""
mapping = mapping_fixtures.MAPPING_DIRECT_MAPPING_THROUGH_KEYWORD
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
assertion = mapping_fixtures.CUSTOMER_ASSERTION
self.assertRaises(exception.DirectMappingError,
rp.process,
assertion)
def test_rule_engine_returns_group_names(self):
"""Check whether RuleProcessor returns group names with their domains.
RuleProcessor should return 'group_names' entry with a list of
dictionaries with two entries 'name' and 'domain' identifying group by
its name and domain.
"""
mapping = mapping_fixtures.MAPPING_GROUP_NAMES
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
assertion = mapping_fixtures.EMPLOYEE_ASSERTION
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
self.assertValidMappedUserObject(mapped_properties)
reference = {
mapping_fixtures.DEVELOPER_GROUP_NAME:
{
"name": mapping_fixtures.DEVELOPER_GROUP_NAME,
"domain": {
"name": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_NAME
}
},
mapping_fixtures.TESTER_GROUP_NAME:
{
"name": mapping_fixtures.TESTER_GROUP_NAME,
"domain": {
"id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
}
}
}
for rule in mapped_properties['group_names']:
self.assertDictEqual(reference.get(rule.get('name')), rule)
def test_rule_engine_whitelist_and_direct_groups_mapping(self):
"""Should return user's groups Developer and Contractor.
The EMPLOYEE_ASSERTION_MULTIPLE_GROUPS should successfully have a match
in MAPPING_GROUPS_WHITELIST. It will test the case where 'whitelist'
correctly filters out Manager and only allows Developer and Contractor.
"""
mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST
assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
reference = {
mapping_fixtures.DEVELOPER_GROUP_NAME:
{
"name": mapping_fixtures.DEVELOPER_GROUP_NAME,
"domain": {
"id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
}
},
mapping_fixtures.CONTRACTOR_GROUP_NAME:
{
"name": mapping_fixtures.CONTRACTOR_GROUP_NAME,
"domain": {
"id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
}
}
}
for rule in mapped_properties['group_names']:
self.assertDictEqual(reference.get(rule.get('name')), rule)
self.assertEqual('tbo', mapped_properties['user']['name'])
self.assertEqual([], mapped_properties['group_ids'])
def test_rule_engine_blacklist_and_direct_groups_mapping(self):
"""Should return user's group Developer.
The EMPLOYEE_ASSERTION_MULTIPLE_GROUPS should successfully have a match
in MAPPING_GROUPS_BLACKLIST. It will test the case where 'blacklist'
correctly filters out Manager and Developer and only allows Contractor.
"""
mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST
assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
reference = {
mapping_fixtures.CONTRACTOR_GROUP_NAME:
{
"name": mapping_fixtures.CONTRACTOR_GROUP_NAME,
"domain": {
"id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
}
}
}
for rule in mapped_properties['group_names']:
self.assertDictEqual(reference.get(rule.get('name')), rule)
self.assertEqual('tbo', mapped_properties['user']['name'])
self.assertEqual([], mapped_properties['group_ids'])
def test_rule_engine_blacklist_and_direct_groups_mapping_multiples(self):
"""Test matching multiple values before the blacklist.
Verifies that the local indexes are correct when matching multiple
remote values for a field when the field occurs before the blacklist
entry in the remote rules.
"""
mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST_MULTIPLES
assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
reference = {
mapping_fixtures.CONTRACTOR_GROUP_NAME:
{
"name": mapping_fixtures.CONTRACTOR_GROUP_NAME,
"domain": {
"id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
}
}
}
for rule in mapped_properties['group_names']:
self.assertDictEqual(reference.get(rule.get('name')), rule)
self.assertEqual('tbo', mapped_properties['user']['name'])
self.assertEqual([], mapped_properties['group_ids'])
def test_rule_engine_whitelist_direct_group_mapping_missing_domain(self):
"""Test if the local rule is rejected upon missing domain value.
This is a variation with a ``whitelist`` filter.
"""
mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_MISSING_DOMAIN
assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
self.assertRaises(exception.ValidationError, rp.process, assertion)
def test_rule_engine_blacklist_direct_group_mapping_missing_domain(self):
"""Test if the local rule is rejected upon missing domain value.
This is a variation with a ``blacklist`` filter.
"""
mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST_MISSING_DOMAIN
assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
self.assertRaises(exception.ValidationError, rp.process, assertion)
def test_rule_engine_no_groups_allowed(self):
"""Should return user mapped to no groups.
The EMPLOYEE_ASSERTION should successfully have a match
in MAPPING_GROUPS_WHITELIST, but 'whitelist' should filter out
the group values from the assertion and thus map to no groups.
"""
mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST
assertion = mapping_fixtures.EMPLOYEE_ASSERTION
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
self.assertListEqual(mapped_properties['group_names'], [])
self.assertListEqual(mapped_properties['group_ids'], [])
self.assertEqual('tbo', mapped_properties['user']['name'])
def test_mapping_federated_domain_specified(self):
"""Test mapping engine when domain 'ephemeral' is explicitly set.
For that, we use mapping rule MAPPING_EPHEMERAL_USER and assertion
EMPLOYEE_ASSERTION
"""
mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
assertion = mapping_fixtures.EMPLOYEE_ASSERTION
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
self.assertValidMappedUserObject(mapped_properties)
def test_set_ephemeral_domain_to_ephemeral_users(self):
"""Test auto assigning service domain to ephemeral users.
Test that ephemeral users will always become members of federated
service domain. The check depends on ``type`` value which must be set
to ``ephemeral`` in case of ephemeral user.
"""
mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER_LOCAL_DOMAIN
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
assertion = mapping_fixtures.CONTRACTOR_ASSERTION
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
self.assertValidMappedUserObject(mapped_properties)
def test_local_user_local_domain(self):
"""Test that local users can have non-service domains assigned."""
mapping = mapping_fixtures.MAPPING_LOCAL_USER_LOCAL_DOMAIN
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
assertion = mapping_fixtures.CONTRACTOR_ASSERTION
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
self.assertValidMappedUserObject(
mapped_properties, user_type='local',
domain_id=mapping_fixtures.LOCAL_DOMAIN)
def test_user_identifications_name(self):
"""Test varius mapping options and how users are identified.
This test calls mapped.setup_username() for propagating user object.
Test plan:
- Check if the user has proper domain ('federated') set
- Check if the user has property type set ('ephemeral')
- Check if user's name is properly mapped from the assertion
- Check if unique_id is properly set and equal to display_name,
as it was not explicitly specified in the mapping.
"""
mapping = mapping_fixtures.MAPPING_USER_IDS
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
assertion = mapping_fixtures.CONTRACTOR_ASSERTION
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
self.assertValidMappedUserObject(mapped_properties)
self.assertEqual('jsmith', mapped_properties['user']['name'])
unique_id, display_name = mapped.get_user_unique_id_and_display_name(
{}, mapped_properties)
self.assertEqual('jsmith', unique_id)
self.assertEqual('jsmith', display_name)
def test_user_identifications_name_and_federated_domain(self):
"""Test varius mapping options and how users are identified.
This test calls mapped.setup_username() for propagating user object.
Test plan:
- Check if the user has proper domain ('federated') set
- Check if the user has propert type set ('ephemeral')
- Check if user's name is properly mapped from the assertion
- Check if the unique_id and display_name are properly set
"""
mapping = mapping_fixtures.MAPPING_USER_IDS
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
assertion = mapping_fixtures.EMPLOYEE_ASSERTION
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
self.assertValidMappedUserObject(mapped_properties)
unique_id, display_name = mapped.get_user_unique_id_and_display_name(
{}, mapped_properties)
self.assertEqual('tbo', display_name)
self.assertEqual('abc123%40example.com', unique_id)
def test_user_identification_id(self):
"""Test varius mapping options and how users are identified.
This test calls mapped.setup_username() for propagating user object.
Test plan:
- Check if the user has proper domain ('federated') set
- Check if the user has propert type set ('ephemeral')
- Check if user's display_name is properly set and equal to unique_id,
as it was not explicitly specified in the mapping.
"""
mapping = mapping_fixtures.MAPPING_USER_IDS
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
assertion = mapping_fixtures.ADMIN_ASSERTION
mapped_properties = rp.process(assertion)
context = {'environment': {}}
self.assertIsNotNone(mapped_properties)
self.assertValidMappedUserObject(mapped_properties)
unique_id, display_name = mapped.get_user_unique_id_and_display_name(
context, mapped_properties)
self.assertEqual('bob', unique_id)
self.assertEqual('bob', display_name)
def test_user_identification_id_and_name(self):
"""Test varius mapping options and how users are identified.
This test calls mapped.setup_username() for propagating user object.
Test plan:
- Check if the user has proper domain ('federated') set
- Check if the user has proper type set ('ephemeral')
- Check if display_name is properly set from the assertion
- Check if unique_id is properly set and equal to value hardcoded
in the mapping
This test does two iterations with different assertions used as input
for the Mapping Engine. Different assertions will be matched with
different rules in the ruleset, effectively issuing different user_id
(hardcoded values). In the first iteration, the hardcoded user_id is
not url-safe and we expect Keystone to make it url safe. In the latter
iteration, provided user_id is already url-safe and we expect server
not to change it.
"""
testcases = [(mapping_fixtures.CUSTOMER_ASSERTION, 'bwilliams'),
(mapping_fixtures.EMPLOYEE_ASSERTION, 'tbo')]
for assertion, exp_user_name in testcases:
mapping = mapping_fixtures.MAPPING_USER_IDS
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
mapped_properties = rp.process(assertion)
context = {'environment': {}}
self.assertIsNotNone(mapped_properties)
self.assertValidMappedUserObject(mapped_properties)
unique_id, display_name = (
mapped.get_user_unique_id_and_display_name(context,
mapped_properties)
)
self.assertEqual(exp_user_name, display_name)
self.assertEqual('abc123%40example.com', unique_id)
def test_whitelist_pass_through(self):
mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_PASS_THROUGH
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
assertion = mapping_fixtures.DEVELOPER_ASSERTION
mapped_properties = rp.process(assertion)
self.assertValidMappedUserObject(mapped_properties)
self.assertEqual('developacct', mapped_properties['user']['name'])
self.assertEqual('Developer',
mapped_properties['group_names'][0]['name'])
def test_mapping_with_incorrect_local_keys(self):
mapping = mapping_fixtures.MAPPING_BAD_LOCAL_SETUP
self.assertRaises(exception.ValidationError,
mapping_utils.validate_mapping_structure,
mapping)
def test_mapping_with_group_name_and_domain(self):
mapping = mapping_fixtures.MAPPING_GROUP_NAMES
mapping_utils.validate_mapping_structure(mapping)
def test_type_not_in_assertion(self):
"""Test that if the remote "type" is not in the assertion it fails."""
mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_PASS_THROUGH
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
assertion = {uuid.uuid4().hex: uuid.uuid4().hex}
self.assertRaises(exception.ValidationError,
rp.process,
assertion)
def test_rule_engine_group_ids_mapping_whitelist(self):
"""Test mapping engine when group_ids is explicitly set.
Also test whitelists on group ids
"""
mapping = mapping_fixtures.MAPPING_GROUPS_IDS_WHITELIST
assertion = mapping_fixtures.GROUP_IDS_ASSERTION
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
self.assertEqual('opilotte', mapped_properties['user']['name'])
self.assertListEqual([], mapped_properties['group_names'])
self.assertItemsEqual(['abc123', 'ghi789', 'klm012'],
mapped_properties['group_ids'])
def test_rule_engine_group_ids_mapping_blacklist(self):
"""Test mapping engine when group_ids is explicitly set.
Also test blacklists on group ids
"""
mapping = mapping_fixtures.MAPPING_GROUPS_IDS_BLACKLIST
assertion = mapping_fixtures.GROUP_IDS_ASSERTION
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
self.assertEqual('opilotte', mapped_properties['user']['name'])
self.assertListEqual([], mapped_properties['group_names'])
self.assertItemsEqual(['abc123', 'ghi789', 'klm012'],
mapped_properties['group_ids'])
def test_rule_engine_group_ids_mapping_only_one_group(self):
"""Test mapping engine when group_ids is explicitly set.
If the group ids list has only one group,
test if the transformation is done correctly
"""
mapping = mapping_fixtures.MAPPING_GROUPS_IDS_WHITELIST
assertion = mapping_fixtures.GROUP_IDS_ASSERTION_ONLY_ONE_GROUP
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
self.assertEqual('opilotte', mapped_properties['user']['name'])
self.assertListEqual([], mapped_properties['group_names'])
self.assertItemsEqual(['210mlk', '321cba'],
mapped_properties['group_ids'])
class TestUnicodeAssertionData(unit.BaseTestCase):
"""Ensure that unicode data in the assertion headers works.
Bug #1525250 reported that something was not getting correctly encoded
and/or decoded when assertion data contained non-ASCII characters.
This test class mimics what happens in a real HTTP request.
"""
def setUp(self):
super(TestUnicodeAssertionData, self).setUp()
self.config_fixture = self.useFixture(config_fixture.Config(cfg.CONF))
self.config_fixture.config(group='federation',
assertion_prefix='PFX')
def _pull_mapping_rules_from_the_database(self):
# NOTE(dstanek): In a live system. The rules are dumped into JSON bytes
# before being # stored in the database. Upon retrieval the bytes are
# loaded and the resulting dictionary is full of unicode text strings.
# Most of tests in this file incorrectly assume the mapping fixture
# dictionary is the same as what it would look like coming out of the
# database. The string, when coming out of the database, are all text.
return jsonutils.loads(jsonutils.dumps(
mapping_fixtures.MAPPING_UNICODE))
def _pull_assertion_from_the_request_headers(self):
# NOTE(dstanek): In a live system the bytes for the assertion are
# pulled from the HTTP headers. These bytes may be decodable as
# ISO-8859-1 according to Section 3.2.4 of RFC 7230. Let's assume
# that our web server plugins are correctly encoding the data.
context = dict(environment=mapping_fixtures.UNICODE_NAME_ASSERTION)
data = mapping_utils.get_assertion_params_from_env(context)
# NOTE(dstanek): keystone.auth.plugins.mapped
return dict(data)
def test_unicode(self):
mapping = self._pull_mapping_rules_from_the_database()
assertion = self._pull_assertion_from_the_request_headers()
rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
values = rp.process(assertion)
fn = assertion.get('PFX_FirstName')
ln = assertion.get('PFX_LastName')
full_name = '%s %s' % (fn, ln)
user_name = values.get('user', {}).get('name')
self.assertEqual(full_name, user_name)
| [
"zhangsheng1730@hotmail.com"
] | zhangsheng1730@hotmail.com |
d3ee9535e42a5f92a997d4814a06b0f8fa25b6c0 | 41a4887a52afe81f203d0917c5ef54ccbe2389fe | /toys/kids/flip_fen.py | 2cc78123c07ce81bb982af2c037ea82d657d8293 | [] | no_license | tgandor/meats | 2efc2e144fc59b2b99aeeaec5f5419dbbb323f9b | 26eb57e49752dab98722a356e80a15f26cbf5929 | refs/heads/master | 2023-08-30T20:35:47.949622 | 2023-08-25T13:26:23 | 2023-08-25T13:26:23 | 32,311,574 | 13 | 9 | null | 2022-06-22T20:44:44 | 2015-03-16T08:39:21 | Python | UTF-8 | Python | false | false | 193 | py | #!/usr/bin/env python
import sys
while True:
line = sys.stdin.readline().split()
if not line:
break
line[0] = '/'.join(line[0].split('/')[::-1])
print(' '.join(line))
| [
"tomasz.gandor@gmail.com"
] | tomasz.gandor@gmail.com |
fb9f4d947e866e83113c59be6505f37dfa281a96 | 93713f46f16f1e29b725f263da164fed24ebf8a8 | /Library/lib/python3.7/site-packages/bokeh-1.4.0-py3.7.egg/bokeh/_testing/plugins/bokeh.py | 9eca47f2d22a7519f60557c494a504860efd575b | [
"BSD-3-Clause"
] | permissive | holzschu/Carnets | b83d15136d25db640cea023abb5c280b26a9620e | 1ad7ec05fb1e3676ac879585296c513c3ee50ef9 | refs/heads/master | 2023-02-20T12:05:14.980685 | 2023-02-13T15:59:23 | 2023-02-13T15:59:23 | 167,671,526 | 541 | 36 | BSD-3-Clause | 2022-11-29T03:08:22 | 2019-01-26T09:26:46 | Python | UTF-8 | Python | false | false | 9,586 | py | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Define a Pytest plugin for a Bokeh-specific testing tools
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import time
from threading import Thread
# External imports
import pytest
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.web import RequestHandler
# Bokeh imports
from bokeh.io import save
from bokeh.server.server import Server
import bokeh.server.views.ws as ws
from bokeh._testing.util.selenium import INIT, RESULTS, wait_for_canvas_resize
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
pytest_plugins = (
"bokeh._testing.plugins.bokeh",
"bokeh._testing.plugins.file_server",
"bokeh._testing.plugins.selenium",
)
__all__ = (
'bokeh_app_info',
'bokeh_model_page',
'bokeh_server_page',
'find_free_port',
'output_file_url',
'single_plot_page',
'test_file_path_and_url',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@pytest.fixture
def output_file_url(request, file_server):
from bokeh.io import output_file
filename = request.function.__name__ + '.html'
file_obj = request.fspath.dirpath().join(filename)
file_path = file_obj.strpath
url = file_path.replace('\\', '/') # Windows-proof
output_file(file_path, mode='inline')
def tear_down():
if file_obj.isfile():
file_obj.remove()
request.addfinalizer(tear_down)
return file_server.where_is(url)
@pytest.fixture
def test_file_path_and_url(request, file_server):
filename = request.function.__name__ + '.html'
file_obj = request.fspath.dirpath().join(filename)
file_path = file_obj.strpath
url = file_path.replace('\\', '/') # Windows-proof
def tear_down():
if file_obj.isfile():
file_obj.remove()
request.addfinalizer(tear_down)
return file_path, file_server.where_is(url)
class _ExitHandler(RequestHandler):
def initialize(self, io_loop):
self.io_loop = io_loop
@gen.coroutine
def get(self, *args, **kwargs):
self.io_loop.stop()
import socket
from contextlib import closing
def find_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
@pytest.fixture
def bokeh_app_info(request, driver):
''' Start a Bokeh server app and return information needed to test it.
Returns a tuple (url, message_test_port), where the latter is defined as
namedtuple('MessageTestPort', ['sent', 'received'])
and will contain all messages that the Bokeh Server sends/receives while
running during the test.
'''
def func(modify_doc):
from collections import namedtuple
MessageTestPort = namedtuple('MessageTestPort', ['sent', 'received'])
ws._message_test_port = MessageTestPort([], [])
port = find_free_port()
def worker():
io_loop = IOLoop()
server = Server({'/': modify_doc},
port=port,
io_loop=io_loop,
extra_patterns=[('/exit', _ExitHandler, dict(io_loop=io_loop))])
server.start()
server.io_loop.start()
t = Thread(target=worker)
t.start()
def cleanup():
driver.get("http://localhost:%d/exit" % port)
# XXX (bev) this line is a workaround for https://github.com/bokeh/bokeh/issues/7970
# and should be removed when that issue is resolved
driver.get_log('browser')
ws._message_test_port = None
t.join()
request.addfinalizer(cleanup)
return "http://localhost:%d/" % port, ws._message_test_port
return func
class _BokehModelPage(object):
def __init__(self, model, driver, output_file_url, has_no_console_errors):
self._driver = driver
self._model = model
self._has_no_console_errors = has_no_console_errors
save(self._model)
self._driver.get(output_file_url)
self.init_results()
@property
def results(self):
WebDriverWait(self._driver, 10).until(EC.staleness_of(self.test_div))
self.test_div = self._driver.find_element_by_class_name("bokeh-test-div")
return self._driver.execute_script(RESULTS)
@property
def driver(self):
return self._driver
def init_results(self):
self._driver.execute_script(INIT)
self.test_div = self._driver.find_element_by_class_name("bokeh-test-div")
def click_element_at_position(self, element, x, y):
actions = ActionChains(self._driver)
actions.move_to_element_with_offset(element, x, y)
actions.click()
actions.perform()
def double_click_element_at_position(self, element, x, y):
actions = ActionChains(self._driver)
actions.move_to_element_with_offset(element, x, y)
actions.click()
actions.click()
actions.perform()
def drag_element_at_position(self, element, x, y, dx, dy, mod=None):
actions = ActionChains(self._driver)
if mod:
actions.key_down(mod)
actions.move_to_element_with_offset(element, x, y)
actions.click_and_hold()
actions.move_by_offset(dx, dy)
actions.release()
if mod:
actions.key_up(mod)
actions.perform()
def send_keys(self, *keys):
actions = ActionChains(self._driver)
actions.send_keys(*keys)
actions.perform()
def has_no_console_errors(self):
return self._has_no_console_errors(self._driver)
class _CanvasMixin(object):
def click_canvas_at_position(self, x, y):
self.click_element_at_position(self.canvas, x, y)
def double_click_canvas_at_position(self, x, y):
self.double_click_element_at_position(self.canvas, x, y)
def click_custom_action(self):
button = self._driver.find_element_by_class_name("bk-toolbar-button-custom-action")
button.click()
def drag_canvas_at_position(self, x, y, dx, dy, mod=None):
self.drag_element_at_position(self.canvas, x, y, dx, dy, mod)
def get_toolbar_button(self, name):
return self.driver.find_element_by_class_name('bk-tool-icon-' + name)
@pytest.fixture()
def bokeh_model_page(driver, output_file_url, has_no_console_errors):
def func(model):
return _BokehModelPage(model, driver, output_file_url, has_no_console_errors)
return func
class _SinglePlotPage(_BokehModelPage, _CanvasMixin):
# model may be a layout, but should only contain a single plot
def __init__(self, model, driver, output_file_url, has_no_console_errors):
super(_SinglePlotPage, self).__init__(model, driver, output_file_url, has_no_console_errors)
self.canvas = self._driver.find_element_by_tag_name('canvas')
wait_for_canvas_resize(self.canvas, self._driver)
@pytest.fixture()
def single_plot_page(driver, output_file_url, has_no_console_errors):
def func(model):
return _SinglePlotPage(model, driver, output_file_url, has_no_console_errors)
return func
class _BokehServerPage(_SinglePlotPage, _CanvasMixin):
def __init__(self, modify_doc, driver, bokeh_app_info, has_no_console_errors):
self._driver = driver
self._has_no_console_errors = has_no_console_errors
self._app_url, self.message_test_port = bokeh_app_info(modify_doc)
time.sleep(0.1)
self._driver.get(self._app_url)
self.init_results()
self.canvas = self._driver.find_element_by_tag_name('canvas')
wait_for_canvas_resize(self.canvas, self._driver)
@pytest.fixture()
def bokeh_server_page(driver, bokeh_app_info, has_no_console_errors):
def func(modify_doc):
return _BokehServerPage(modify_doc, driver, bokeh_app_info, has_no_console_errors)
return func
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| [
"nicolas.holzschuch@inria.fr"
] | nicolas.holzschuch@inria.fr |
85b78518837e1b6109966119e16266b004ade7f8 | d66818f4b951943553826a5f64413e90120e1fae | /hackerrank/10 Days of Statistics/Day 8 - Least Square Regression Line/test.py | f5478d0354c945b2bef65095410b7eec0bb973e3 | [
"MIT"
] | permissive | HBinhCT/Q-project | 0f80cd15c9945c43e2e17072416ddb6e4745e7fa | 19923cbaa3c83c670527899ece5c3ad31bcebe65 | refs/heads/master | 2023-08-30T08:59:16.006567 | 2023-08-29T15:30:21 | 2023-08-29T15:30:21 | 247,630,603 | 8 | 1 | MIT | 2020-07-22T01:20:23 | 2020-03-16T06:48:02 | Python | UTF-8 | Python | false | false | 521 | py | import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'95 85',
'85 95',
'80 70',
'70 65',
'60 70',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(), '78.288\n')
if __name__ == '__main__':
unittest.main()
| [
"hbinhct@gmail.com"
] | hbinhct@gmail.com |
c4d3467d4d06eb14220feda8004d28995b35fb8d | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/sklearn/metrics/cluster/setup.py | ee0fc49bd4888209bfc12eab6d56e2a17ddf12c9 | [
"MIT"
] | permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:e3107d1ba6c899cf749991d704ff933206baa46ff4c7d81e504222277355d67d
size 667
| [
"yamprakash130@gmail.com"
] | yamprakash130@gmail.com |
304935338bad968454868794f114987185cb098c | 39d4504ec1da8975fac526d6801b94f4348b6b61 | /research/morph_net/network_regularizers/flop_regularizer_test.py | 2993cf87269d9d58ed03a97ed5e99a4530de23d4 | [
"Apache-2.0"
] | permissive | vincentcheny/models | fe0ff5888e6ee00a0d4fa5ee14154acdbeebe7ad | afb1a59fc1bc792ac72d1a3e22e2469020529788 | refs/heads/master | 2020-07-23T21:38:24.559521 | 2019-11-15T07:50:11 | 2019-11-15T07:50:11 | 207,712,649 | 1 | 0 | Apache-2.0 | 2019-09-11T03:12:31 | 2019-09-11T03:12:31 | null | UTF-8 | Python | false | false | 21,594 | py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for flop_regularizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import tensorflow as tf
from tensorflow.contrib.slim.nets import resnet_v1
from morph_net.network_regularizers import bilinear_cost_utils
from morph_net.network_regularizers import flop_regularizer
arg_scope = tf.contrib.framework.arg_scope
layers = tf.contrib.layers
_coeff = bilinear_cost_utils.flop_coeff
NUM_CHANNELS = 3
class GammaFlopLossTest(tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
self.BuildWithBatchNorm()
with self.test_session():
self.Init()
def BuildWithBatchNorm(self):
params = {
'trainable': True,
'normalizer_fn': layers.batch_norm,
'normalizer_params': {
'scale': True
}
}
with arg_scope([layers.conv2d], **params):
self.BuildModel()
def BuildModel(self):
# Our test model is:
#
# -> conv1 --+ -> conv3 -->
# / | /
# image [concat]
# \ | \
# -> conv2 --+ -> conv4 -->
#
# (the model has two "outputs", conv3 and conv4).
#
image = tf.constant(0.0, shape=[1, 17, 19, NUM_CHANNELS])
conv1 = layers.conv2d(image, 13, [7, 5], padding='SAME', scope='conv1')
conv2 = layers.conv2d(image, 23, [1, 1], padding='SAME', scope='conv2')
concat = tf.concat([conv1, conv2], 3)
self.conv3 = layers.conv2d(
concat, 29, [3, 3], stride=2, padding='SAME', scope='conv3')
self.conv4 = layers.conv2d(
concat, 31, [1, 1], stride=1, padding='SAME', scope='conv4')
self.name_to_var = {v.op.name: v for v in tf.global_variables()}
self.gamma_flop_reg = flop_regularizer.GammaFlopsRegularizer(
[self.conv3.op, self.conv4.op], gamma_threshold=0.45)
def GetConv(self, name):
return tf.get_default_graph().get_operation_by_name(name + '/Conv2D')
def Init(self):
tf.global_variables_initializer().run()
gamma1 = self.name_to_var['conv1/BatchNorm/gamma']
gamma1.assign([0.8] * 7 + [0.2] * 6).eval()
gamma2 = self.name_to_var['conv2/BatchNorm/gamma']
gamma2.assign([-0.7] * 11 + [0.1] * 12).eval()
gamma3 = self.name_to_var['conv3/BatchNorm/gamma']
gamma3.assign([0.6] * 10 + [-0.3] * 19).eval()
gamma4 = self.name_to_var['conv4/BatchNorm/gamma']
gamma4.assign([-0.5] * 17 + [-0.4] * 14).eval()
def cost(self, conv):
with self.test_session():
return self.gamma_flop_reg.get_cost(conv).eval()
def loss(self, conv):
with self.test_session():
return self.gamma_flop_reg.get_regularization_term(conv).eval()
def testCost(self):
# Conv1 has 7 gammas above 0.45, and NUM_CHANNELS inputs (from the image).
conv = self.GetConv('conv1')
self.assertEqual(_coeff(conv) * 7 * NUM_CHANNELS, self.cost([conv]))
# Conv2 has 11 gammas above 0.45, and NUM_CHANNELS inputs (from the image).
conv = self.GetConv('conv2')
self.assertEqual(_coeff(conv) * 11 * NUM_CHANNELS, self.cost([conv]))
# Conv3 has 10 gammas above 0.45, and 7 + 11 inputs from conv1 and conv2.
conv = self.GetConv('conv3')
self.assertEqual(_coeff(conv) * 10 * 18, self.cost([conv]))
# Conv4 has 17 gammas above 0.45, and 7 + 11 inputs from conv1 and conv2.
conv = self.GetConv('conv4')
self.assertEqual(_coeff(conv) * 17 * 18, self.cost([conv]))
# Test that passing a list of convs sums their contributions:
convs = [self.GetConv('conv3'), self.GetConv('conv4')]
self.assertEqual(
self.cost(convs[:1]) + self.cost(convs[1:]), self.cost(convs))
class GammaFlopLossWithDepthwiseConvTestBase(object):
"""Test flop_regularizer for a network with depthwise convolutions."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def GetSession(self):
return
def BuildWithBatchNorm(self):
params = {
'trainable': True,
'normalizer_fn': layers.batch_norm,
'normalizer_params': {
'scale': True
}
}
ops_with_batchnorm = [layers.conv2d]
if self._depthwise_use_batchnorm:
ops_with_batchnorm.append(layers.separable_conv2d)
with arg_scope(ops_with_batchnorm, **params):
self.BuildModel()
def BuildModel(self):
# Our test model is:
#
# -> dw1 --> conv1 --+
# / |
# image [concat] --> conv3
# \ |
# -> conv2 --> dw2 --+
#
# (the model has one "output", conv3).
#
image = tf.constant(0.0, shape=[1, 17, 19, NUM_CHANNELS])
dw1 = layers.separable_conv2d(
image, None, [3, 3], depth_multiplier=1, stride=1, scope='dw1')
conv1 = layers.conv2d(dw1, 13, [7, 5], padding='SAME', scope='conv1')
conv2 = layers.conv2d(image, 23, [1, 1], padding='SAME', scope='conv2')
dw2 = layers.separable_conv2d(
conv2, None, [5, 5], depth_multiplier=1, stride=1, scope='dw2')
concat = tf.concat([conv1, dw2], 3)
self.conv3 = layers.conv2d(
concat, 29, [3, 3], stride=2, padding='SAME', scope='conv3')
self.name_to_var = {v.op.name: v for v in tf.global_variables()}
self.gamma_flop_reg = flop_regularizer.GammaFlopsRegularizer(
[self.conv3.op], gamma_threshold=0.45)
def GetConv(self, name):
return tf.get_default_graph().get_operation_by_name(
name + ('/Conv2D' if 'conv' in name else '/depthwise'))
def GetGammaAbsValue(self, name):
gamma_op = tf.get_default_graph().get_operation_by_name(name +
'/BatchNorm/gamma')
with self.GetSession(): # pylint: disable=not-context-manager
gamma = gamma_op.outputs[0].eval()
return np.abs(gamma)
def Init(self):
tf.global_variables_initializer().run()
gamma1 = self.name_to_var['conv1/BatchNorm/gamma']
gamma1.assign([0.8] * 7 + [0.2] * 6).eval()
gamma2 = self.name_to_var['conv2/BatchNorm/gamma']
gamma2.assign([-0.7] * 11 + [0.1] * 12).eval()
gamma3 = self.name_to_var['conv3/BatchNorm/gamma']
gamma3.assign([0.6] * 10 + [-0.3] * 19).eval()
# Initialize gamma for depthwise convs only if there are Batchnorm for them.
if self._depthwise_use_batchnorm:
gammad1 = self.name_to_var['dw1/BatchNorm/gamma']
gammad1.assign([-0.3] * 1 + [-0.9] * 2).eval()
gammad2 = self.name_to_var['dw2/BatchNorm/gamma']
gammad2.assign([0.3] * 5 + [0.9] * 10 + [-0.1] * 8).eval()
def cost(self, conv): # pylint: disable=invalid-name
with self.GetSession(): # pylint: disable=not-context-manager
cost = self.gamma_flop_reg.get_cost(conv)
return cost.eval() if isinstance(cost, tf.Tensor) else cost
def loss(self, conv): # pylint: disable=invalid-name
with self.GetSession(): # pylint: disable=not-context-manager
reg = self.gamma_flop_reg.get_regularization_term(conv)
return reg.eval() if isinstance(reg, tf.Tensor) else reg
class GammaFlopLossWithDepthwiseConvTest(
tf.test.TestCase, GammaFlopLossWithDepthwiseConvTestBase):
"""Test flop_regularizer for a network with depthwise convolutions."""
def setUp(self):
self._depthwise_use_batchnorm = True
tf.reset_default_graph()
self.BuildWithBatchNorm()
with self.test_session():
self.Init()
def GetSession(self):
return self.test_session()
def testCost(self):
# Dw1 has 2 gammas above 0.45 out of NUM_CHANNELS inputs (from the image),
# but because the input doesn't have a regularizer, it has no way of
# removing the channels, so the channel count is still NUM_CHANNELS.
conv = self.GetConv('dw1')
self.assertEqual(_coeff(conv) * NUM_CHANNELS, self.cost([conv]))
# Conv1 has 7 gammas above 0.45, and NUM_CHANNELS inputs (from dw1).
conv = self.GetConv('conv1')
self.assertEqual(_coeff(conv) * 7 * NUM_CHANNELS, self.cost([conv]))
# Conv2 has 11 active + 12 inactive, while Dw2 has 5 inactive, 10 active and
# 8 active. Their max (or) has 15 active and 8 inactive.
# Conv2 has NUM_CHANNELS inputs (from the image).
conv = self.GetConv('conv2')
self.assertEqual(_coeff(conv) * 15 * NUM_CHANNELS, self.cost([conv]))
# Dw2 has 15 out of 23 inputs (from the Conv2).
conv = self.GetConv('dw2')
self.assertEqual(_coeff(conv) * 15, self.cost([conv]))
# Conv3 has 10 gammas above 0.45, and 7 + 15 inputs from conv1 and dw2.
conv = self.GetConv('conv3')
self.assertEqual(_coeff(conv) * 10 * 22, self.cost([conv]))
def testRegularizer(self):
# Dw1 depthwise convolution is connected to the input (no regularizer).
conv = self.GetConv('dw1')
# Although the effective regularizer for dw is computed as below:
# gamma = self.GetGammaAbsValue('dw1')
# expected_loss = _coeff(conv) * gamma.sum()
# Since the input is not regularized, dw does not return a regularizer.
expected_loss = 0.0
self.assertNear(expected_loss, self.loss([conv]), expected_loss * 1e-5)
# Conv1 takes Dw1 as input, its input regularizer is from dw1.
conv = self.GetConv('conv1')
gamma = self.GetGammaAbsValue('conv1')
# The effective size for dw can be computed from its gamma, and
# the loss may be computed as follows:
# gamma_dw = self.GetGammaAbsValue('dw1')
# expected_loss = _coeff(conv) * (
# gamma.sum() * (gamma_dw > 0.45).sum() + gamma_dw.sum() *
# (gamma > 0.45).sum())
# However, since dw cannot change shape because its input doesn't have a
# regularizer, the real loss we expect should be:
expected_loss = _coeff(conv) * (gamma.sum() * NUM_CHANNELS)
self.assertNear(expected_loss, self.loss([conv]), expected_loss * 1e-5)
# Dw2 depthwise convolution is connected to conv2 (grouped regularizer).
conv = self.GetConv('conv2')
gamma_conv = self.GetGammaAbsValue('conv2')
dw = self.GetConv('dw2')
gamma_dw = self.GetGammaAbsValue('dw2')
gamma = np.maximum(gamma_dw, gamma_conv).sum()
expected_loss = _coeff(conv) * (gamma * 3 + (gamma > 0.45).sum() * 0)
self.assertNear(expected_loss, self.loss([conv]), expected_loss * 1e-5)
expected_loss = _coeff(dw) * gamma * 2
self.assertNear(expected_loss, self.loss([dw]), expected_loss * 1e-5)
class GammaFlopLossWithDepthwiseConvNoBatchNormTest(
tf.test.TestCase, GammaFlopLossWithDepthwiseConvTestBase):
"""Test flop_regularizer for un-batchnormed depthwise convolutions.
This test is used to confirm that when depthwise convolution is not BNed, it
will not be considered towards the regularizer, but it will be counted towards
the cost.
This design choice is for backward compatibility for users who did not
regularize depthwise convolutions. However, the cost will be reported
regardless in order to be faithful to the real computation complexity.
"""
def setUp(self):
self._depthwise_use_batchnorm = False
tf.reset_default_graph()
self.BuildWithBatchNorm()
with self.test_session():
self.Init()
def GetSession(self):
return self.test_session()
def testCost(self):
# Dw1 has NUM_CHANNELS inputs (from the image).
conv = self.GetConv('dw1')
self.assertEqual(_coeff(conv) * 3, self.cost([conv]))
# Conv1 has 7 gammas above 0.45, and 3 inputs (from dw1).
conv = self.GetConv('conv1')
self.assertEqual(_coeff(conv) * 7 * 3, self.cost([conv]))
# Conv2 has 11 active outputs and NUM_CHANNELS inputs (from the image).
conv = self.GetConv('conv2')
self.assertEqual(_coeff(conv) * 11 * NUM_CHANNELS, self.cost([conv]))
# Dw2 has 11 inputs (pass-through from the Conv2).
conv = self.GetConv('dw2')
self.assertEqual(_coeff(conv) * 11, self.cost([conv]))
# Conv3 has 10 gammas above 0.45, and 7 + 11 inputs from conv1 and dw2.
conv = self.GetConv('conv3')
self.assertEqual(_coeff(conv) * 10 * 18, self.cost([conv]))
def testRegularizer(self):
# Dw1 depthwise convolution is connected to the input (no regularizer).
conv = self.GetConv('dw1')
expected_loss = 0.0
self.assertNear(expected_loss, self.loss([conv]), expected_loss * 1e-5)
# Conv1 takes Dw1 as input, but it's not affected by dw1 because depthwise
# is not BNed.
conv = self.GetConv('conv1')
gamma = self.GetGammaAbsValue('conv1')
expected_loss = _coeff(conv) * (gamma.sum() * NUM_CHANNELS)
self.assertNear(expected_loss, self.loss([conv]), expected_loss * 1e-5)
# Dw2 depthwise convolution is connected to conv2 (pass through).
dw = self.GetConv('dw2')
gamma = self.GetGammaAbsValue('conv2')
expected_loss = _coeff(dw) * gamma.sum() * 2
self.assertNear(expected_loss, self.loss([dw]), expected_loss * 1e-5)
class GammaFlopResidualConnectionsLossTest(tf.test.TestCase):
"""Tests flop_regularizer for a network with residual connections."""
def setUp(self):
tf.reset_default_graph()
tf.set_random_seed(7)
self._threshold = 0.6
def buildModel(self, resnet_fn, block_fn):
# We use this model as a test case because the slim.nets.resnet module is
# used in some production.
#
# The model looks as follows:
#
# Image --> unit_1/shortcut
# Image --> unit_1/conv1 --> unit_1/conv2 --> unit_1/conv3
#
# unit_1/shortcut + unit_1/conv3 --> unit_1 (residual connection)
#
# unit_1 --> unit_2/conv1 -> unit_2/conv2 --> unit_2/conv3
#
# unit_1 + unit_2/conv3 --> unit_2 (residual connection)
#
# In between, there are strided convolutions and pooling ops, but these
# should not affect the regularizer.
blocks = [
block_fn('block1', base_depth=7, num_units=2, stride=2),
]
image = tf.constant(0.0, shape=[1, 2, 2, NUM_CHANNELS])
net = resnet_fn(
image, blocks, include_root_block=False, is_training=False)[0]
net = tf.reduce_mean(net, axis=(1, 2))
return layers.fully_connected(net, 23, scope='FC')
def buildGraphWithBatchNorm(self, resnet_fn, block_fn):
params = {
'trainable': True,
'normalizer_fn': layers.batch_norm,
'normalizer_params': {
'scale': True
}
}
with arg_scope([layers.conv2d, layers.separable_conv2d], **params):
self.net = self.buildModel(resnet_fn, block_fn)
def initGamma(self):
assignments = []
gammas = {}
for v in tf.global_variables():
if v.op.name.endswith('/gamma'):
assignments.append(v.assign(tf.random_uniform(v.shape)))
gammas[v.op.name] = v
with self.test_session() as s:
s.run(assignments)
self._gammas = s.run(gammas)
def getGamma(self, short_name):
tokens = short_name.split('/')
name = ('resnet_v1/block1/' + tokens[0] + '/bottleneck_v1/' + tokens[1] +
'/BatchNorm/gamma')
return self._gammas[name]
def getOp(self, short_name):
if short_name == 'FC':
return tf.get_default_graph().get_operation_by_name('FC/MatMul')
tokens = short_name.split('/')
name = ('resnet_v1/block1/' + tokens[0] + '/bottleneck_v1/' + tokens[1] +
'/Conv2D')
return tf.get_default_graph().get_operation_by_name(name)
def numAlive(self, short_name):
return np.sum(self.getGamma(short_name) > self._threshold)
def getCoeff(self, short_name):
return _coeff(self.getOp(short_name))
def testCost(self):
self.buildGraphWithBatchNorm(resnet_v1.resnet_v1, resnet_v1.resnet_v1_block)
self.initGamma()
res_alive = np.logical_or(
np.logical_or(
self.getGamma('unit_1/shortcut') > self._threshold,
self.getGamma('unit_1/conv3') > self._threshold),
self.getGamma('unit_2/conv3') > self._threshold)
self.gamma_flop_reg = flop_regularizer.GammaFlopsRegularizer(
[self.net.op], self._threshold)
expected = {}
expected['unit_1/shortcut'] = (
self.getCoeff('unit_1/shortcut') * np.sum(res_alive) * NUM_CHANNELS)
expected['unit_1/conv1'] = (
self.getCoeff('unit_1/conv1') * self.numAlive('unit_1/conv1') *
NUM_CHANNELS)
expected['unit_1/conv2'] = (
self.getCoeff('unit_1/conv2') * self.numAlive('unit_1/conv2') *
self.numAlive('unit_1/conv1'))
expected['unit_1/conv3'] = (
self.getCoeff('unit_1/conv3') * np.sum(res_alive) *
self.numAlive('unit_1/conv2'))
expected['unit_2/conv1'] = (
self.getCoeff('unit_2/conv1') * self.numAlive('unit_2/conv1') *
np.sum(res_alive))
expected['unit_2/conv2'] = (
self.getCoeff('unit_2/conv2') * self.numAlive('unit_2/conv2') *
self.numAlive('unit_2/conv1'))
expected['unit_2/conv3'] = (
self.getCoeff('unit_2/conv3') * np.sum(res_alive) *
self.numAlive('unit_2/conv2'))
expected['FC'] = 2.0 * np.sum(res_alive) * 23.0
# TODO: Is there a way to use Parametrized Tests to make this more
# elegant?
with self.test_session():
for short_name in expected:
cost = self.gamma_flop_reg.get_cost([self.getOp(short_name)]).eval()
self.assertEqual(expected[short_name], cost)
self.assertEqual(
sum(expected.values()),
self.gamma_flop_reg.get_cost().eval())
class GroupLassoFlopRegTest(tf.test.TestCase):
def assertNearRelatively(self, expected, actual):
self.assertNear(expected, actual, expected * 1e-6)
def testFlopRegularizer(self):
tf.reset_default_graph()
tf.set_random_seed(7907)
with arg_scope(
[layers.conv2d, layers.conv2d_transpose],
weights_initializer=tf.random_normal_initializer):
# Our test model is:
#
# -> conv1 --+
# / |--[concat]
# image --> conv2 --+
# \
# -> convt
#
# (the model has two "outputs", convt and concat).
#
image = tf.constant(0.0, shape=[1, 17, 19, NUM_CHANNELS])
conv1 = layers.conv2d(
image, 13, [7, 5], padding='SAME', scope='conv1')
conv2 = layers.conv2d(
image, 23, [1, 1], padding='SAME', scope='conv2')
self.concat = tf.concat([conv1, conv2], 3)
self.convt = layers.conv2d_transpose(
image, 29, [7, 5], stride=3, padding='SAME', scope='convt')
self.name_to_var = {v.op.name: v for v in tf.global_variables()}
with self.test_session():
tf.global_variables_initializer().run()
threshold = 1.0
flop_reg = flop_regularizer.GroupLassoFlopsRegularizer(
[self.concat.op, self.convt.op], threshold=threshold)
with self.test_session() as s:
evaluated_vars = s.run(self.name_to_var)
def group_norm(weights, axis=(0, 1, 2)): # pylint: disable=invalid-name
return np.sqrt(np.mean(weights**2, axis=axis))
reg_vectors = {
'conv1': group_norm(evaluated_vars['conv1/weights'], (0, 1, 2)),
'conv2': group_norm(evaluated_vars['conv2/weights'], (0, 1, 2)),
'convt': group_norm(evaluated_vars['convt/weights'], (0, 1, 3))
}
num_alive = {k: np.sum(r > threshold) for k, r in reg_vectors.iteritems()}
total_outputs = (
reg_vectors['conv1'].shape[0] + reg_vectors['conv2'].shape[0])
total_alive_outputs = sum(num_alive.values())
assert total_alive_outputs > 0, (
'All outputs are dead - test is trivial. Decrease the threshold.')
assert total_alive_outputs < total_outputs, (
'All outputs are alive - test is trivial. Increase the threshold.')
coeff1 = _coeff(_get_op('conv1/Conv2D'))
coeff2 = _coeff(_get_op('conv2/Conv2D'))
coefft = _coeff(_get_op('convt/conv2d_transpose'))
expected_flop_cost = NUM_CHANNELS * (
coeff1 * num_alive['conv1'] + coeff2 * num_alive['conv2'] +
coefft * num_alive['convt'])
expected_reg_term = NUM_CHANNELS * (
coeff1 * np.sum(reg_vectors['conv1']) + coeff2 * np.sum(
reg_vectors['conv2']) + coefft * np.sum(reg_vectors['convt']))
with self.test_session():
self.assertEqual(
round(expected_flop_cost), round(flop_reg.get_cost().eval()))
self.assertNearRelatively(expected_reg_term,
flop_reg.get_regularization_term().eval())
def _get_op(name): # pylint: disable=invalid-name
return tf.get_default_graph().get_operation_by_name(name)
if __name__ == '__main__':
tf.test.main()
| [
"1155107977@link.cuhk.edu.hk"
] | 1155107977@link.cuhk.edu.hk |
8fcbd338b424590aa21d59e3c91d905708a89c6c | 1be3fd4f49ff1ba41b36bdb45ad1cd738b7e2e97 | /动态规划/序列型动态规划/LeetCode53_最大子序和.py | 6e96ab053d5009923bc8cc47501246da1dcd9274 | [] | no_license | ltzp/LeetCode | d5dcc8463e46b206515c1205582305d8ce981cc5 | f43d70cac56bdf6377b22b865174af822902ff78 | refs/heads/master | 2023-07-28T02:36:44.202092 | 2021-09-08T15:55:09 | 2021-09-08T15:55:09 | 308,343,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/04/09
# @Author : yuetao
# @Site :
# @File : LeetCode53_最大子序和.py
# @Desc :
"""
输入:nums = [-2,1,-3,4,-1,2,1,-5,4]
输出:6
解释:连续子数组 [4,-1,2,1] 的和最大,为 6 。
在状态的计算过程中我们可以发现,后面状态的计算只与当前状态的值有关,而与此阶段之前的值无关,所以具有无后效性。
"""
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
length = len(nums)
dp = [float("-inf") for _ in range(length)]
dp[0] = nums[0]
res = dp[0]
for i in range(1, length):
dp[i] = max(dp[i-1] + nums[i], nums[i])
res = max(dp[i], res)
return res
if __name__ == '__main__':
solve = Solution()
nums = [-2,1,-3,4,-1,2,1,-5,4]
result = solve.maxSubArray(nums)
print(result)
| [
"806518802@qq.com"
] | 806518802@qq.com |
d7b51f7ff602b1bd5707500365b2f4011d95eb01 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_ethiopian.py | c12e2d18374d3610fe33cc409fbae8d3b9221e7b | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py |
#calss header
class _ETHIOPIAN():
def __init__(self,):
self.name = "ETHIOPIAN"
self.definitions = [u'belonging to or relating to Ethiopia or its people']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
bd58733911b35cc559924bee35b1bcf22c6073f3 | 7e5cfebee3b9a5ed4c7dac67b59346d8a0e3f989 | /john1/apps.py | d812bd37b5228e2d1fa96f594a13a5a1a8a5ca44 | [] | no_license | Hasnain-4/Blog-Todo-Shop | fd64f5ec194ce182f73f20f69110ab6ca762ba1f | 0dccf9c3e48c8fd3cb1eb973e0ffe9596dd5c32d | refs/heads/master | 2023-01-05T09:56:19.034169 | 2020-11-07T06:55:56 | 2020-11-07T06:55:56 | 308,890,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | from django.apps import AppConfig
class John1Config(AppConfig):
name = 'john1'
| [
"ansarihasnain3598@gmail.com"
] | ansarihasnain3598@gmail.com |
c8609399aa50011c76fc5e290f0e4ee907c1c282 | e4fcd551a9d83e37a2cd6d5a2b53a3cc397ccb10 | /codes/t2i_models/CogView2/SwissArmyTransformer-main/examples/roberta/finetune_roberta_rte.py | 2a97af8db773d45f62776f782c3b787a6622c713 | [
"Apache-2.0"
] | permissive | eslambakr/HRS_benchmark | 20f32458a47c6e1032285b44e70cf041a64f842c | 9f153d8c71d1119e4b5c926b899bb556a6eb8a59 | refs/heads/main | 2023-08-08T11:57:26.094578 | 2023-07-22T12:24:51 | 2023-07-22T12:24:51 | 597,550,499 | 33 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,667 | py | import os
import torch
import argparse
import numpy as np
from SwissArmyTransformer import mpu, get_args
from SwissArmyTransformer.training.deepspeed_training import training_main
from roberta_model import RobertaModel
from SwissArmyTransformer.model.mixins import PrefixTuningMixin, MLPHeadMixin
class ClassificationModel(RobertaModel):
def __init__(self, args, transformer=None, parallel_output=True):
super().__init__(args, transformer=transformer, parallel_output=parallel_output)
self.del_mixin('roberta-final')
self.add_mixin('classification_head', MLPHeadMixin(args.hidden_size, 2048, 1))
self.add_mixin('prefix-tuning', PrefixTuningMixin(args.num_layers, args.hidden_size // args.num_attention_heads, args.num_attention_heads, args.prefix_len))
def disable_untrainable_params(self):
self.transformer.word_embeddings.requires_grad_(False)
# for layer_id in range(len(self.transformer.layers)):
# self.transformer.layers[layer_id].requires_grad_(False)
def get_batch(data_iterator, args, timers):
# Items and their type.
keys = ['input_ids', 'position_ids', 'attention_mask', 'label']
datatype = torch.int64
# Broadcast data.
timers('data loader').start()
if data_iterator is not None:
data = next(data_iterator)
else:
data = None
timers('data loader').stop()
data_b = mpu.broadcast_data(keys, data, datatype)
# Unpack.
tokens = data_b['input_ids'].long()
labels = data_b['label'].long()
position_ids = data_b['position_ids'].long()
attention_mask = data_b['attention_mask'][:, None, None, :].float()
# Convert
if args.fp16:
attention_mask = attention_mask.half()
return tokens, labels, attention_mask, position_ids, (tokens!=1)
def forward_step(data_iterator, model, args, timers):
"""Forward step."""
# Get the batch.
timers('batch generator').start()
tokens, labels, attention_mask, position_ids, loss_mask = get_batch(
data_iterator, args, timers)
timers('batch generator').stop()
logits, *mems = model(tokens, position_ids, attention_mask)
# pred = ((logits.contiguous().float().squeeze(-1)) * loss_mask).sum(dim=-1) / loss_mask.sum(dim=-1)
pred = logits.contiguous().float().squeeze(-1)[..., 0]
loss = torch.nn.functional.binary_cross_entropy_with_logits(
pred,
labels.float()
)
acc = ((pred > 0.).long() == labels).sum() / labels.numel()
return loss, {'acc': acc}
pretrain_path = ''
from transformers import RobertaTokenizer
tokenizer = RobertaTokenizer.from_pretrained(os.path.join(pretrain_path, 'roberta-large'))
from transformers.models.roberta.modeling_roberta import create_position_ids_from_input_ids
def _encode(text, text_pair):
encoded_input = tokenizer(text, text_pair, max_length=args.sample_length, padding='max_length', truncation='only_first')
position_ids = create_position_ids_from_input_ids(torch.tensor([encoded_input['input_ids']]), 1, 0)
return dict(input_ids=encoded_input['input_ids'], position_ids=position_ids[0].numpy(), attention_mask=encoded_input['attention_mask'])
from SwissArmyTransformer.data_utils import load_hf_dataset
def create_dataset_function(path, args):
def process_fn(row):
pack, label = _encode(row['premise'], row['hypothesis']), int(row['label'])
return {
'input_ids': np.array(pack['input_ids'], dtype=np.int64),
'position_ids': np.array(pack['position_ids'], dtype=np.int64),
'attention_mask': np.array(pack['attention_mask'], dtype=np.int64),
'label': label
}
return load_hf_dataset(path, process_fn, columns = ["input_ids", "position_ids", "attention_mask", "label"], cache_dir='/dataset/fd5061f6/SwissArmyTransformerDatasets', offline=False, transformer_name="rte_transformer")
if __name__ == '__main__':
py_parser = argparse.ArgumentParser(add_help=False)
py_parser.add_argument('--new_hyperparam', type=str, default=None)
py_parser.add_argument('--sample_length', type=int, default=512-16)
py_parser.add_argument('--prefix_len', type=int, default=16)
py_parser.add_argument('--old_checkpoint', action="store_true")
known, args_list = py_parser.parse_known_args()
args = get_args(args_list)
args = argparse.Namespace(**vars(args), **vars(known))
# from cogdata.utils.ice_tokenizer import get_tokenizer as get_ice
# tokenizer = get_tokenizer(args=args, outer_tokenizer=get_ice())
training_main(args, model_cls=ClassificationModel, forward_step_function=forward_step, create_dataset_function=create_dataset_function)
| [
"islam.bakr.2017@gmail.com"
] | islam.bakr.2017@gmail.com |
17d251d9dfe4693be09b37532fad90b492e6416d | 11ccb6827cf643b37c44a2e174422f9c6f9497f2 | /falcon/bench/dj/manage.py | 1ed3638a4df1bec17ec47502b337b01f857ace4a | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | falconry/falcon | 7506f838520e5105714702d9a9b2f0e608a704b9 | 32207fe8a7ebdfb95271d8430c4977c7a654928c | refs/heads/master | 2023-08-31T05:32:03.755869 | 2023-08-21T21:45:34 | 2023-08-21T21:45:34 | 7,040,500 | 8,922 | 1,183 | Apache-2.0 | 2023-09-09T20:58:36 | 2012-12-06T18:17:51 | Python | UTF-8 | Python | false | false | 808 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dj.settings')
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # NOQA
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
'available on your PYTHONPATH environment variable? Did you '
'forget to activate a virtual environment?'
)
raise
execute_from_command_line(sys.argv)
| [
"john.vrbanac@linux.com"
] | john.vrbanac@linux.com |
78e28807403ebdbf2c4fe8e40201ff5e0fc9d861 | b8cc6d34ad44bf5c28fcca9e0df01d9ebe0ee339 | /入门学习/datetime_eg.py | a136cabe3d671ca43e319ccc8cc933f54ffd180a | [] | no_license | python-yc/pycharm_script | ae0e72898ef44a9de47e7548170a030c0a752eb5 | c8947849090c71e131df5dc32173ebe9754df951 | refs/heads/master | 2023-01-05T06:16:33.857668 | 2020-10-31T08:09:53 | 2020-10-31T08:09:53 | 296,778,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,398 | py | #coding:utf-8
import datetime
#datetime常见属性
#datetime.date():一个理想和的日期,提供year,month,day属性
dt = datetime.date(2018,3,26)
print(dt)
print(dt.day)
print(dt.year)
print(dt.month)
#datetime.time:提供一个理想和的时间
#datetime.datetime:提供日期与实践的组合
#datetime.timedelta:提供一个时间差,时间长度
from datetime import datetime
import time
#常用类方法:today、now、utcnow、fromtimestamp(从时间戳中返回本地时间)
dt = datetime(2018,2,26) #此处的时间没有用到,但是需要这三个参数,删除报错
print(datetime.today())
print(dt.now())
print(dt.fromtimestamp(time.time()))
print("===============11111111")
#datetime.timedelta:表示一个时间间隔
from datetime import datetime,timedelta
t1 = datetime.now()
print(t1.strftime("%Y-%m-%d %H:%M:%S"))
#td表示以小时的时间长度
td = timedelta(hours=1)
print(td)
#当前时间加上时间间隔后,把得到的一小时后的时间格式化输出
print((t1+td).strftime("%Y-%m-%d %H:%M:%S"))
##timeit-时间测量工具
###- 测量程序运行时间间隔实验
print("===============222222")
def p():
time.sleep(3.6)
t1 = time.time()
p()
print(time.time() - t1)
print("===========3333333333")
#利用timeit调用代码,执行100000次,查看运行时间
#格式timeit.timeit(stmt=c,number=10000),c可以是函数,也可以是字符串式的代码块
####字符串代码块形式s='''内容在三引号之间'''
#timeit可以执行一个函数,来测量函数的执行时间,如:
import timeit
def doIt():
num = 2
for i in range(num):
print("Repeat for {0}".format(i))
#执行函数,重复10次
print(doIt)
print(type(doIt))
t = timeit.timeit(stmt=doIt,number=10)
print(t)
print("=============或者这样同上一个")
import timeit
s ='''
def doIt(num):
num = 2
for i in range(num):
print("Repeat for {0}".format(i))
'''
#执行函数,重复10次
#执行doIt(num),setup负责把环境变量准备好
#实际相当于给timeit创造一个小环境,在创造的小环境中,代码的执行顺序大致是
#
'''
def doIt(num):
......
num = 2
doIt(num)
'''
#此处的setup后的num=2循环输出的范围(即for后range的参数),number后的数字表示的循环次数
t = timeit.timeit("doIt(num)",setup=s+"num=0",number=10)
print(t)
# help(timeit.timeit)
| [
"15655982512.com"
] | 15655982512.com |
f044f713fb899779529717704ff3f507de299d33 | a905f5b56732cb49d5d692b75c7334d772b67144 | /Gui/t2.py | bf8c4c91ef955c1e02ec9ad8aec3f390ced0d5f7 | [] | no_license | weilaidb/PythonExample2 | d859acee3eb3e9b6448553b4f444c95ab2b2fc8f | 492fa2d687a8f3b9370ed8c49ffb0d06118246c7 | refs/heads/master | 2022-04-20T00:35:35.456199 | 2020-04-26T00:32:12 | 2020-04-26T00:32:12 | 114,774,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | '''''5.向Listbox中添加一个item'''
# 以上的例子均使用了insert来向Listbox中添加 一个item,这个函数有两个属性一个为添加的索引值,另一个为添加的项(item)
# 有两个特殊的值ACTIVE和END,ACTIVE是向当前选中的item前插入一个(即使用当前选中的索引作为插入位置);END是向
# Listbox的最后一项添加插入一项
# 先向Listbox中追加三个item,再在Listbox开始添加三项
from tkinter import *
root = Tk()
lb = Listbox(root)
for item in ['python','tkinter','widget']:
lb.insert(END,item)
#只添加一项将[]作为一个item
#lb.insert(0,['linux','windows','unix'])
#添加三项,每个string为一个item
lb.insert(0,'linux','windows','unix')
lb.pack()
root.mainloop() | [
"wxjlmr@126.com"
] | wxjlmr@126.com |
374eb79ae24863d43ed492b428e933b87a306275 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_43/66.py | 6e5512c1daca685a498a0b628f17dadb6578b723 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | import sys
lines = sys.stdin.readlines()
i = 1
while i<len(lines):
s = set()
msg = lines[i].rstrip()
for each in msg:
s.add(each)
#print len(s)
maxbase = len(s)
if maxbase == 1:
maxbase = 2
cost = maxbase ** (len(msg)-1)
cands = range(len(s))
cands.reverse()
try:
cands.remove(1)
except:
pass
power = len(msg)-2
index = 2
prev = {}
prev[msg[0]] = 1
for x in xrange(1, len(msg)):
c = msg[x]
#print c
if not prev.has_key(c):
prev[c] = int(cands.pop())
#print prev[c]
#print "maxbase ", maxbase
#print "power ", power
cost += prev[c] * (maxbase ** power)
power -= 1
#print prev
print "Case #"+str(i)+": "+str(cost)
i += 1
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
5925428ff96b79d625eb9f6608d44301fe70d7e7 | 7298d1692c6948f0880e550d6100c63a64ce3ea1 | /catalog-configs/PDB/ihm_entity_poly_segment.py | f4913948c672c471483fa197dce530de14411c3e | [] | no_license | informatics-isi-edu/protein-database | b7684b3d08dbf22c1e7c4a4b8460248c6f0d2c6d | ce4be1bf13e6b1c22f3fccbb513824782609991f | refs/heads/master | 2023-08-16T10:24:10.206574 | 2023-07-25T23:10:42 | 2023-07-25T23:10:42 | 174,095,941 | 2 | 0 | null | 2023-06-16T19:44:43 | 2019-03-06T07:39:14 | Python | UTF-8 | Python | false | false | 11,355 | py | import argparse
from attrdict import AttrDict
from deriva.core import ErmrestCatalog, get_credential, DerivaPathError
from deriva.utils.catalog.components.deriva_model import DerivaCatalog
import deriva.core.ermrest_model as em
from deriva.core.ermrest_config import tag as chaise_tags
from deriva.utils.catalog.manage.update_catalog import CatalogUpdater, parse_args
groups = {
'pdb-admin': 'https://auth.globus.org/0b98092c-3c41-11e9-a8c8-0ee7d80087ee',
'pdb-reader': 'https://auth.globus.org/8875a770-3c40-11e9-a8c8-0ee7d80087ee',
'pdb-writer': 'https://auth.globus.org/c94a1e5c-3c40-11e9-a5d1-0aacc65bfe9a',
'pdb-curator': 'https://auth.globus.org/eef3e02a-3c40-11e9-9276-0edc9bdd56a6',
'isrd-staff': 'https://auth.globus.org/176baec4-ed26-11e5-8e88-22000ab4b42b'
}
table_name = 'ihm_entity_poly_segment'
schema_name = 'PDB'
column_annotations = {
'RCT': {
chaise_tags.display: {
'name': 'Creation Time'
},
chaise_tags.generated: None,
chaise_tags.immutable: None
},
'RMT': {
chaise_tags.display: {
'name': 'Last Modified Time'
},
chaise_tags.generated: None,
chaise_tags.immutable: None
},
'RCB': {
chaise_tags.display: {
'name': 'Created By'
},
chaise_tags.generated: None,
chaise_tags.immutable: None
},
'RMB': {
chaise_tags.display: {
'name': 'Modified By'
},
chaise_tags.generated: None,
chaise_tags.immutable: None
},
'structure_id': {},
'comp_id_begin': {},
'comp_id_end': {},
'entity_id': {},
'id': {
chaise_tags.generated: None
},
'seq_id_begin': {},
'seq_id_end': {},
'Owner': {}
}
column_comment = {
'structure_id': 'type:text\nThe value of _entry.id identifies the data block.\n\n Note that this item need not be a number; it can be any unique\n identifier.',
'comp_id_begin': 'type:text\nThe value of _chem_comp.id must uniquely identify each item in\n the CHEM_COMP list.\n\n For protein polymer entities, this is the three-letter code for\n the amino acid.\n\n For nucleic acid polymer entities, this is the one-letter code\n for the base.',
'comp_id_end': 'type:text\nThe value of _chem_comp.id must uniquely identify each item in\n the CHEM_COMP list.\n\n For protein polymer entities, this is the three-letter code for\n the amino acid.\n\n For nucleic acid polymer entities, this is the one-letter code\n for the base.',
'entity_id': 'type:text\nThe value of _entity.id must uniquely identify a record in the\n ENTITY list.\n\n Note that this item need not be a number; it can be any unique\n identifier.',
'id': 'type:int4\nA unique identifier for the polymeric segment.',
'seq_id_begin': 'type:int4\nThe value of _entity_poly_seq.num must uniquely and sequentially\n identify a record in the ENTITY_POLY_SEQ list.\n\n Note that this item must be a number and that the sequence\n numbers must progress in increasing numerical order.',
'seq_id_end': 'type:int4\nThe value of _entity_poly_seq.num must uniquely and sequentially\n identify a record in the ENTITY_POLY_SEQ list.\n\n Note that this item must be a number and that the sequence\n numbers must progress in increasing numerical order.',
'Owner': 'Group that can update the record.'
}
column_acls = {}
column_acl_bindings = {}
column_defs = [
em.Column.define(
'structure_id',
em.builtin_types['text'],
nullok=False,
comment=column_comment['structure_id'],
),
em.Column.define(
'comp_id_begin',
em.builtin_types['text'],
nullok=False,
comment=column_comment['comp_id_begin'],
),
em.Column.define(
'comp_id_end',
em.builtin_types['text'],
nullok=False,
comment=column_comment['comp_id_end'],
),
em.Column.define(
'entity_id', em.builtin_types['text'], nullok=False, comment=column_comment['entity_id'],
),
em.Column.define(
'id',
em.builtin_types['int4'],
nullok=False,
annotations=column_annotations['id'],
comment=column_comment['id'],
),
em.Column.define(
'seq_id_begin',
em.builtin_types['int4'],
nullok=False,
comment=column_comment['seq_id_begin'],
),
em.Column.define(
'seq_id_end',
em.builtin_types['int4'],
nullok=False,
comment=column_comment['seq_id_end'],
),
em.Column.define('Owner', em.builtin_types['text'], comment=column_comment['Owner'],
),
]
visible_columns = {
'*': [
{
'source': 'RID'
}, {
'source': [{
'outbound': ['PDB', 'ihm_entity_poly_segment_structure_id_fkey']
}, 'RID']
},
{
'source': [
{
'outbound': ['PDB', 'ihm_entity_poly_segment_mm_poly_res_label_begin_fkey']
}, 'mon_id'
],
'markdown_name': 'comp id begin'
},
{
'source': [
{
'outbound': ['PDB', 'ihm_entity_poly_segment_mm_poly_res_label_end_fkey']
}, 'mon_id'
],
'markdown_name': 'comp id end'
},
{
'source': [
{
'outbound': ['PDB', 'ihm_entity_poly_segment_mm_poly_res_label_begin_fkey']
}, 'entity_id'
],
'markdown_name': 'entity id'
}, {
'source': 'id'
},
{
'source': [
{
'outbound': ['PDB', 'ihm_entity_poly_segment_mm_poly_res_label_begin_fkey']
}, 'num'
],
'markdown_name': 'seq id begin'
},
{
'source': [
{
'outbound': ['PDB', 'ihm_entity_poly_segment_mm_poly_res_label_end_fkey']
}, 'num'
],
'markdown_name': 'seq id end'
},
{
'source': [
{
'outbound': ['PDB', 'ihm_entity_poly_segment_mm_poly_res_label_begin_fkey']
}, 'RID'
],
'markdown_name': 'molecular entity begin'
},
{
'source': [
{
'outbound': ['PDB', 'ihm_entity_poly_segment_mm_poly_res_label_end_fkey']
}, 'RID'
],
'markdown_name': 'molecular entity end'
}, {
'source': 'RCT'
}, {
'source': 'RMT'
}, {
'source': [{
'outbound': ['PDB', 'ihm_entity_poly_segment_RCB_fkey']
}, 'RID']
}, {
'source': [{
'outbound': ['PDB', 'ihm_entity_poly_segment_RMB_fkey']
}, 'RID']
}, {
'source': [{
'outbound': ['PDB', 'ihm_entity_poly_segment_Owner_fkey']
}, 'RID']
}
]
}
visible_foreign_keys = {
'filter': 'detailed',
'detailed': [
['PDB', 'ihm_model_representation_details_entity_poly_segment_id_fkey'],
['PDB', 'ihm_struct_assembly_details_entity_poly_segment_id_fkey'],
['PDB', 'ihm_starting_model_details_entity_poly_segment_id_fkey'],
['PDB', 'ihm_localization_density_files_entity_poly_segment_id_fkey']
]
}
table_display = {'row_name': {'row_markdown_pattern': '{{{id}}}'}}
table_annotations = {
chaise_tags.table_display: table_display,
chaise_tags.visible_columns: visible_columns,
chaise_tags.visible_foreign_keys: visible_foreign_keys,
}
table_comment = None
table_acls = {}
table_acl_bindings = {
'self_service_group': {
'types': ['update', 'delete'],
'scope_acl': ['*'],
'projection': ['Owner'],
'projection_type': 'acl'
},
'self_service_creator': {
'types': ['update', 'delete'],
'scope_acl': ['*'],
'projection': ['RCB'],
'projection_type': 'acl'
}
}
key_defs = [
em.Key.define(['RID'], constraint_names=[('PDB', 'ihm_entity_poly_segment_RIDkey1')],
),
em.Key.define(
['structure_id', 'id'], constraint_names=[('PDB', 'ihm_entity_poly_segment_primary_key')],
),
]
fkey_defs = [
em.ForeignKey.define(
['Owner'],
'public',
'Catalog_Group', ['ID'],
constraint_names=[('PDB', 'ihm_entity_poly_segment_Owner_fkey')],
acls={
'insert': [groups['pdb-curator']],
'update': [groups['pdb-curator']]
},
acl_bindings={
'set_owner': {
'types': ['update', 'insert'],
'scope_acl': ['*'],
'projection': ['ID'],
'projection_type': 'acl'
}
},
),
em.ForeignKey.define(
['RCB'],
'public',
'ERMrest_Client', ['ID'],
constraint_names=[('PDB', 'ihm_entity_poly_segment_RCB_fkey')],
acls={
'insert': ['*'],
'update': ['*']
},
),
em.ForeignKey.define(
['RMB'],
'public',
'ERMrest_Client', ['ID'],
constraint_names=[('PDB', 'ihm_entity_poly_segment_RMB_fkey')],
acls={
'insert': ['*'],
'update': ['*']
},
),
em.ForeignKey.define(
['structure_id'],
'PDB',
'entry', ['id'],
constraint_names=[('PDB', 'ihm_entity_poly_segment_structure_id_fkey')],
acls={
'insert': ['*'],
'update': ['*']
},
on_update='CASCADE',
on_delete='SET NULL',
),
em.ForeignKey.define(
['structure_id', 'comp_id_begin', 'entity_id', 'seq_id_begin'],
'PDB',
'entity_poly_seq', ['structure_id', 'mon_id', 'entity_id', 'num'],
constraint_names=[('PDB', 'ihm_entity_poly_segment_mm_poly_res_label_begin_fkey')],
acls={
'insert': ['*'],
'update': ['*']
},
on_update='CASCADE',
on_delete='SET NULL',
),
em.ForeignKey.define(
['structure_id', 'comp_id_end', 'entity_id', 'seq_id_end'],
'PDB',
'entity_poly_seq', ['structure_id', 'mon_id', 'entity_id', 'num'],
constraint_names=[('PDB', 'ihm_entity_poly_segment_mm_poly_res_label_end_fkey')],
acls={
'insert': ['*'],
'update': ['*']
},
on_update='CASCADE',
on_delete='SET NULL',
),
]
table_def = em.Table.define(
table_name,
column_defs=column_defs,
key_defs=key_defs,
fkey_defs=fkey_defs,
annotations=table_annotations,
acls=table_acls,
acl_bindings=table_acl_bindings,
comment=table_comment,
provide_system=True
)
def main(catalog, mode, replace=False, really=False):
updater = CatalogUpdater(catalog)
updater.update_table(mode, schema_name, table_def, replace=replace, really=really)
if __name__ == "__main__":
host = 'pdb.isrd.isi.edu'
catalog_id = 5
mode, replace, host, catalog_id = parse_args(host, catalog_id, is_table=True)
catalog = DerivaCatalog(host, catalog_id=catalog_id, validate=False)
main(catalog, mode, replace)
| [
"carl@isi.edu"
] | carl@isi.edu |
52b9507585c470c8c272b918d3b8c1f3741990ae | 651a296c8f45b5799781fd78a6b5329effe702a0 | /subset/i4_sqrt.py | 15231735ca1b9e9e6b31120a626bc3326da2d377 | [] | no_license | pdhhiep/Computation_using_Python | 095d14370fe1a01a192d7e44fcc81a52655f652b | 407ed29fddc267950e9860b8bbd1e038f0387c97 | refs/heads/master | 2021-05-29T12:35:12.630232 | 2015-06-27T01:05:17 | 2015-06-27T01:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,042 | py | #!/usr/bin/env python
def i4_sqrt ( n ):
#*****************************************************************************80
#
## I4_SQRT finds the integer square root of N by solving N = Q^2 + R.
#
# Discussion:
#
# The integer square root of N is an integer Q such that
# Q^2 <= N but N < (Q+1)^2.
#
# A simpler calculation would be something like
#
# Q = INT ( SQRT ( REAL ( N ) ) )
#
# but this calculation has the virtue of using only integer arithmetic.
#
# To avoid the tedium of worrying about negative arguments, the routine
# automatically considers the absolute value of the argument.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 14 March 2015
#
# Author:
#
# John Burkardt
#
# Reference:
#
# Mark Herkommer,
# Number Theory, A Programmer's Guide,
# McGraw Hill, 1999, pages 294-307.
#
# Parameters:
#
# Input, integer N, the number whose integer square root is desired.
# Actually, only the absolute value of N is considered.
#
# Output, integer Q, R, the integer square root, and positive remainder,
# of N.
#
n_abs = abs ( n )
q = n_abs
if ( 0 < n_abs ):
while ( ( n_abs // q ) < q ):
q = ( ( q + ( n_abs // q ) ) // 2 )
r = n_abs - q * q
return q, r
def i4_sqrt_test ( ):
#*****************************************************************************80
#
## I4_SQRT_TEST tests I4_SQRT.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 14 March 2015
#
# Author:
#
# John Burkardt
#
print ''
print 'I4_SQRT_TEST'
print ' I4_SQRT computes the square root of an I4.'
print ''
print ' N Sqrt(N) Remainder'
print ''
for n in range ( -5, 21 ):
q, r = i4_sqrt ( n )
print ' %7d %7d %7d' % ( n, q, r )
#
# Terminate.
#
print ''
print 'I4_SQRT_TEST'
print ' Normal end of execution.'
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
i4_sqrt_test ( )
timestamp ( )
| [
"siplukabir@gmail.com"
] | siplukabir@gmail.com |
e03a27d2244db7374620a0b5dca2991593c42218 | 59d5a801dd8361fe2b68f0cdfc1a0c06bbe9d275 | /Competition/恶意样本检测/features/common.py | 39b15781ba1fd25376c5b96c49ab1448724ab5da | [] | no_license | HanKin2015/Machine_to_DeepingLearning | 2ff377aa68655ca246eb19bea20fec232cec5d77 | 58fa8d06ef8a8eb0762e7cbd32a09552882c5412 | refs/heads/master | 2023-01-25T01:16:41.440064 | 2023-01-18T08:23:49 | 2023-01-18T08:23:49 | 134,238,811 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,837 | py | # -*- coding: utf-8 -*-
"""
文 件 名: common.py
文件描述: 公共文件库
作 者: HeJian
创建日期: 2022.06.15
修改日期:2022.06.16
Copyright (c) 2022 HeJian. All rights reserved.
"""
import os, re, time, datetime
os.environ['NUMEXPR_MAX_THREADS'] = '64'
import subprocess
from log import logger
from PIL import Image
import binascii
import pefile
from capstone import *
import pickle
from collections import *
from concurrent.futures import ThreadPoolExecutor
import pandas as pd
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split # 随机划分为训练子集和测试子集
from sklearn.model_selection import cross_val_score # 模型评价:训练误差和测试误差
from sklearn.feature_selection import SelectFromModel# 特征选择(三种方法)
from sklearn.metrics import roc_auc_score # 评价指标
from sklearn.metrics import f1_score # F1
from sklearn.model_selection import KFold # K折交叉验证
from sklearn.ensemble import RandomForestClassifier # RFC随机森林分类
from sklearn.ensemble import ExtraTreesClassifier # ETC极端随机树分类
import xgboost as xgb # XGB
import lightgbm as lgb # LGB
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import LinearSVC
from sklearn import tree
from sklearn.feature_extraction import FeatureHasher
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from mlxtend.classifier import StackingCVClassifier
DATA_PATH = './data/' # 数据路径
DATASET_PATH = './dataset/' # 数据集路径
SAMPLE_PATH = './AIFirst_data/' # 样本数据集存储路径
TRAIN_WHITE_PATH = SAMPLE_PATH+'train/white/' # 训练集白样本路径
TRAIN_BLACK_PATH = SAMPLE_PATH+'train/black/' # 训练集黑样本路径
TEST_PATH = SAMPLE_PATH+'test/' # 测试集样本路径
TRAIN_WHITE_GRAY_IMAGES_PATH = './gray_images/train/white/' # 训练集白样本灰度图像存储路径
TRAIN_BLACK_GRAY_IMAGES_PATH = './gray_images/train/black/' # 训练集黑样本灰度图像存储路径
TRAIN_GRAY_IMAGES_PATH = './gray_images/train/' # 训练集样本灰度图像存储路径
TEST_GRAY_IMAGES_PATH = './gray_images/test/' # 测试集样本灰度图像存储路径
MODEL_PATH = './model/' # 模型路径
TRAIN_DATASET_PATH = DATASET_PATH+'train_dataset.csv' # 训练集样本数据集路径
TEST_DATASET_PATH = DATASET_PATH+'test_dataset.csv' # 训练集样本数据集路径
TEST_DIRTY_DATASET_PATH = DATASET_PATH+'test_dirty_dataset.csv' # 测试集脏数据集路径
TRAIN_WHITE_IMAGE_MATRIX_PATH = DATA_PATH+'train_white_image_matrix.csv' # 训练集白样本图像矩阵数据集存储路径
TRAIN_BLACK_IMAGE_MATRIX_PATH = DATA_PATH+'train_black_image_matrix.csv' # 训练集黑样本图像矩阵数据集存储路径
TRAIN_IMAGE_MATRIX_PATH = DATASET_PATH+'train_image_matrix.csv'
TEST_IMAGE_MATRIX_PATH_ = DATASET_PATH+'test_image_matrix.csv'
TEST_IMAGE_MATRIX_PATH = DATA_PATH+'test_image_matrix.csv' # 测试集样本图像矩阵数据集存储路径
TRAIN_BLACK_0_3000_IMAGE_MATRIX_PATH = DATA_PATH+'train_black_0_3000_image_matrix.csv' # 训练集黑样本图像矩阵数据集存储路径
TRAIN_BLACK_3000_IMAGE_MATRIX_PATH = DATA_PATH+'train_black_3000_image_matrix.csv' # 训练集黑样本图像矩阵数据集存储路径
TEST_0_3000_IMAGE_MATRIX_PATH = DATA_PATH+'test_0_3000_image_matrix.csv' # 测试集样本操作指令码3-gram特征存储路径
TEST_3000_6000_IMAGE_MATRIX_PATH = DATA_PATH+'test_3000_6000_image_matrix.csv' # 测试集样本操作指令码3-gram特征存储路径
TEST_6000_IMAGE_MATRIX_PATH = DATA_PATH+'test_6000_image_matrix.csv' # 测试集样本操作指令码3-gram特征存储路径
TRAIN_WHITE_DATASET_FILENAME = 'train_white_dataset.csv' # 训练集白样本数据集文件名
TRAIN_BLACK_DATASET_FILENAME = 'train_black_dataset.csv' # 训练集黑样本数据集路径
TRAIN_DATASET_FILENAME = 'train_dataset.csv' # 训练集样本数据集文件名
TEST_DATASET_FILENAME = 'test_dataset.csv' # 测试集样本数据集文件名
TRAIN_DIRTY_DATASET_FILENAME = 'train_dirty_dataset.csv' # 训练集脏数据集文件名
TEST_DIRTY_DATASET_FILENAME = 'test_dirty_dataset.csv' # 测试集脏数据集文件名
TEST_DIRTY_OPCODE_N_GRAM_PATH = DATASET_PATH+'test_dirty_opcode_n_gram.csv'
TEST_DIRTY_26_PATH = DATASET_PATH+'test_dirty_26.csv'
TRAIN_WHITE_CUSTOM_STRINGS_PATH = 'train_white_strings.csv' # 训练集白样本自定义字符串数据集文件名
TRAIN_BLACK_CUSTOM_STRINGS_PATH = 'train_black_strings.csv' # 训练集黑样本自定义字符串数据集文件名
TEST_CUSTOM_STRINGS_PATH = 'test_strings.csv' # 测试集样本自定义字符串数据集文件名
TRAIN_WHITE_STRING_FEATURES_PATH = DATA_PATH+'train_white_string_features.csv' # 训练集白样本字符串特征数据集文件名
TRAIN_BLACK_STRING_FEATURES_PATH = DATA_PATH+'train_black_string_features.csv' # 训练集黑样本字符串特征数据集文件名
TEST_STRING_FEATURES_PATH = DATA_PATH+'test_string_features.csv' # 测试集样本字符串特征数据集文件名
TRAIN_WHITE_OPCODE_3_GRAM_PATH = DATA_PATH+'train_white_opcode_3_gram.csv' # 训练集白样本操作指令码3-gram特征存储路径
TRAIN_BLACK_OPCODE_3_GRAM_PATH = DATA_PATH+'train_black_opcode_3_gram.csv' # 训练集黑样本操作指令码3-gram特征存储路径
TRAIN_OPCODE_3_GRAM_PATH = DATASET_PATH+'train_opcode_3_gram.csv' # 训练集样本操作指令码3-gram特征存储路径
TEST_OPCODE_3_GRAM_PATH_ = DATASET_PATH+'test_opcode_3_gram.csv'
TEST_OPCODE_3_GRAM_PATH = DATA_PATH+'test_opcode_3_gram.csv' # 测试集样本操作指令码3-gram特征存储路径
TEST_0_3000_OPCODE_3_GRAM_PATH = DATA_PATH+'test_0_3000_opcode_3_gram.csv' # 测试集0-3000样本操作指令码3-gram特征存储路径
TEST_3000_6000_OPCODE_3_GRAM_PATH = DATA_PATH+'test_3000_6000_opcode_3_gram.csv' # 测试集3000-6000样本操作指令码3-gram特征存储路径
TEST_6000_OPCODE_3_GRAM_PATH = DATA_PATH+'test_6000_opcode_3_gram.csv' # 测试集6000-样本操作指令码3-gram特征存储路径
MODEL_SCORE_PATH = MODEL_PATH+'score' # 模型分数路径
IAMGE_MATRIX_RFC_MODEL_PATH = MODEL_PATH+'image_matrix_rfc.pkl' # RFC模型路径
IAMGE_MATRIX_XGB_MODEL_PATH = MODEL_PATH+'image_matrix_xgb.pkl' # XGB模型路径
IAMGE_MATRIX_LGB_MODEL_PATH = MODEL_PATH+'image_matrix_lgb.pkl' # LGB模型路径
IAMGE_MATRIX_RFC_MODEL_SCORE_PATH = MODEL_PATH+'image_matrix_rfc.score' # RFC模型分数路径
IAMGE_MATRIX_XGB_MODEL_SCORE_PATH = MODEL_PATH+'image_matrix_xgb.score' # XGB模型分数路径
IAMGE_MATRIX_LGB_MODEL_SCORE_PATH = MODEL_PATH+'image_matrix_lgb.score' # LGB模型分数路径
BASELINE_RFC_MODEL_PATH = MODEL_PATH+'baseline_rfc.pkl'
BASELINE_RFC_MODEL_SCORE_PATH = MODEL_PATH+'baseline_rfc.score'
CUSTOM_STRING_RFC_MODEL_PATH = MODEL_PATH+'custom_string_rfc.pkl'
CUSTOM_STRING_RFC_MODEL_SCORE_PATH = MODEL_PATH+'custom_string_rfc.score'
COMBINE_RFC_MODEL_PATH = MODEL_PATH+'combine_rfc.pkl'
COMBINE_RFC_MODEL_SCORE_PATH = MODEL_PATH+'combine_rfc.score'
STACKING_MODEL_PATH = MODEL_PATH+'stacking.pkl'
STACKING_MODEL_SCORE_PATH = MODEL_PATH+'stacking.score'
MALICIOUS_SAMPLE_DETECTION_MODEL_PATH = MODEL_PATH+'malicious_sample_detection.pkl' # 恶意样本检测训练模型路径
MALICIOUS_SAMPLE_DETECTION_SELECTOR_PATH = MODEL_PATH+'malicious_sample_detection.selector' # 恶意样本检测特征选择器路径
COMBINE_RFC_SELECTOR_PATH = MODEL_PATH+'combine_rfc_selector.pkl'
OPCODE_N_GRAM_MODEL_PATH = MODEL_PATH+'opcode_n_gram.pkl' # RFC模型路径
OPCODE_N_GRAM_MODEL_SCORE_PATH = MODEL_PATH+'opcode_n_gram.score' # RFC模型分数路径
DIRTY_DATASET_MODEL_PATH = MODEL_PATH+'dirty_dataset_rfc.pkl' # 脏样本训练模型路径
DIRTY_DATASET_MODEL_SCORE_PATH = MODEL_PATH+'dirty_dataset_rfc.score' # 脏样本模型分数路径
RESULT_PATH = './result.csv' # 预测结果存储路径
# 创建数据集路径文件夹
if not os.path.exists(DATASET_PATH):
os.makedirs(DATASET_PATH)
# 创建模型文件夹
if not os.path.exists(MODEL_PATH):
os.makedirs(MODEL_PATH)
# 线程数量
THREAD_NUM = 64
| [
"1058198502@qq.com"
] | 1058198502@qq.com |
bd49b4bc90efd2d2ceda83f672ca908ee94d8909 | 520e7d0bdc294e89e807ffc5d0277e0b1df035d4 | /taskloaf/object_ref.py | 4021c994f4aaf111c88d9cb39bd4b13d507c22d4 | [
"MIT"
] | permissive | tanwanirahul/taskloaf | 5a454e683dbf681f0a417911280c12176bd4a3a6 | 7f7b027ef18b8475922054ccc44dfcb5de0433bc | refs/heads/master | 2020-04-20T20:55:52.124091 | 2018-12-10T19:21:45 | 2018-12-10T19:21:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,008 | py | import asyncio
import taskloaf.serialize
import taskloaf.allocator
import taskloaf.refcounting
def put(worker, obj):
return FreshObjectRef(worker, obj)
def alloc(worker, nbytes):
ptr = worker.allocator.malloc(nbytes)
def on_delete(_id, worker = worker, ptr = ptr):
worker.allocator.free(ptr)
ref = taskloaf.refcounting.Ref(worker, on_delete)
return ObjectRef(ref, ptr, False)
def submit_ref_work(worker, to, f):
ref = put(worker, f).convert()
worker.send(to, worker.protocol.REFWORK, [ref, b''])
def setup_plugin(worker):
assert(hasattr(worker, 'allocator'))
assert(hasattr(worker, 'ref_manager'))
worker.object_cache = dict()
worker.protocol.add_msg_type(
'REMOTEGET', type = ObjectMsg, handler = handle_remote_get
)
worker.protocol.add_msg_type(
'REMOTEPUT', type = ObjectMsg, handler = handle_remote_put
)
worker.protocol.add_msg_type(
'REFWORK', type = ObjectMsg, handler = handle_ref_work
)
def handle_ref_work(worker, args):
f_ref = args[0]
async def run_me(worker):
f = await f_ref.get()
await worker.wait_for_work(f)
worker.start_async_work(run_me)
def is_ref(x):
return isinstance(x, FreshObjectRef) or isinstance(x, ObjectRef)
"""
This class barely needs to do anything because python already uses reference
counting for GC. It just needs to make sure that once it is serialized, all
serialized versions with the same id point to the same serialized chunk of
memory. This is based on the observation that in order to access a chunk of
memory from another worker, the reference first has to arrive at that other
worker and thus serialization of the reference can be used as a trigger for
serialization of the underlying object.
"""
class FreshObjectRef:
def __init__(self, worker, obj):
self.worker = worker
self._id = self.worker.get_new_id()
self.obj = obj
self.objref = None
async def get(self):
return self.get_local()
def get_local(self):
return self.obj
def __reduce__(self):
objref = self.convert()
return (FreshObjectRef.reconstruct, (objref,))
@classmethod
def reconstruct(cls, objref):
return objref
def convert(self):
if self.objref is not None:
return self.objref
else:
return self._new_ref()
def _new_ref(self):
deserialize, child_refs, serialized_obj = serialize_if_needed(
self.worker, self.obj
)
nbytes = len(serialized_obj)
ptr = self.worker.allocator.malloc(nbytes)
ptr.deref()[:] = serialized_obj
self.worker.object_cache[(self.worker.addr, self._id)] = self.obj
def on_delete(_id, worker = self.worker, ptr = ptr):
key = (worker.addr, _id)
del worker.object_cache[key]
worker.allocator.free(ptr)
ref = taskloaf.refcounting.Ref(
self.worker, on_delete, _id = self._id, child_refs = child_refs
)
self.objref = ObjectRef(ref, ptr, deserialize)
return self.objref
def encode_capnp(self, msg):
self.convert().encode_capnp(msg)
def is_bytes(v):
return isinstance(v, bytes) or isinstance(v, memoryview)
def serialize_if_needed(worker, obj):
if is_bytes(obj):
return False, [], obj
else:
child_refs, blob = taskloaf.serialize.dumps(worker, obj)
return True, child_refs, blob
"""
It seems like we're recording two indexes to the data:
-- the ptr itself
-- the (owner, _id) pair
This isn't strictly necessary, but has some advantages.
"""
class ObjectRef:
def __init__(self, ref, ptr, deserialize):
self.ref = ref
self.ptr = ptr
self.deserialize = deserialize
def key(self):
return self.ref.key()
@property
def worker(self):
return self.ref.worker
async def get(self):
await self._ensure_available()
self._ensure_deserialized()
return self.get_local()
async def get_buffer(self):
await self._ensure_available()
return self.ptr.deref()
def get_local(self):
return self.worker.object_cache[self.key()]
async def _ensure_available(self):
self.ref._ensure_child_refs_deserialized()
if self.key() in self.worker.object_cache:
val = self.worker.object_cache[self.key()]
if isinstance(val, asyncio.Future):
await val
ptr_accessible = self.ptr is not None
if not ptr_accessible:
await self._remote_get()
def _ensure_deserialized(self):
if self.key() not in self.worker.object_cache:
self._deserialize_and_store(self.ptr.deref())
async def _remote_get(self):
future = asyncio.Future(loop = self.worker.ioloop)
self.worker.object_cache[self.key()] = future
self.worker.send(
self.ref.owner, self.worker.protocol.REMOTEGET, [self, b'']
)
return (await future)
def _remote_put(self, buf):
future = self.worker.object_cache[self.key()]
obj = self._deserialize_and_store(buf)
future.set_result(obj)
def _deserialize_and_store(self, buf):
if self.deserialize:
assert(isinstance(self.ref.child_refs, list))
out = taskloaf.serialize.loads(
self.ref.worker, self.ref.child_refs, buf
)
else:
out = buf
self.worker.object_cache[self.key()] = out
return out
def __getstate__(self):
return dict(
ref = self.ref,
deserialize = self.deserialize,
ptr = self.ptr,
)
def encode_capnp(self, msg):
self.ref.encode_capnp(msg.ref)
msg.deserialize = self.deserialize
self.ptr.encode_capnp(msg.ptr)
@classmethod
def decode_capnp(cls, worker, msg):
objref = ObjectRef.__new__(ObjectRef)
objref.ref = taskloaf.refcounting.Ref.decode_capnp(worker, msg.ref)
objref.deserialize = msg.deserialize
objref.ptr = taskloaf.allocator.Ptr.decode_capnp(
worker, objref.ref.owner, msg.ptr
)
return objref
class ObjectMsg:
@staticmethod
def serialize(args):
ref, v = args
m = taskloaf.message_capnp.Message.new_message()
m.init('object')
ref.encode_capnp(m.object.objref)
m.object.val = bytes(v)
return m
@staticmethod
def deserialize(worker, msg):
return (
ObjectRef.decode_capnp(worker, msg.object.objref),
msg.object.val
)
def handle_remote_get(worker, args):
msg = worker.cur_msg
async def reply(w):
worker.send(
msg.sourceAddr,
worker.protocol.REMOTEPUT,
[args[0], await args[0].get_buffer()]
)
worker.run_work(reply)
def handle_remote_put(worker, args):
args[0]._remote_put(args[1])
| [
"t.ben.thompson@gmail.com"
] | t.ben.thompson@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.