blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b1084a090a7ff81f0b9175324a0fccd57c80d89f | f1a6013a41ea5d49e034d1932991ef039e767e34 | /utils/pycraft/networking/types/enum.py | 61aa2384964941e48560e53764ea808ae78b02e9 | [
"Apache-2.0"
] | permissive | Merrg1n/PCRC | a544c234ea1eea79bb6fb235cc610d14090d6a7f | 0af9f6d3a1f9f2e0b78b71241176968b0e5983af | refs/heads/master | 2023-01-24T01:16:57.956263 | 2020-06-18T09:10:11 | 2020-06-18T09:10:11 | 256,414,160 | 2 | 1 | Apache-2.0 | 2020-04-30T04:01:53 | 2020-04-17T05:54:22 | Python | UTF-8 | Python | false | false | 3,179 | py | """Types for enumerations of values occurring in packets, including operations
for working with these values.
The values in an enum are given as class attributes with UPPERCASE names.
These classes are usually not supposed to be instantiated, but sometimes an
instantiatable class may subclass Enum to provide class enum attributes in
addition to other functionality.
"""
from .utility import Vector
__all__ = (
'Enum', 'BitFieldEnum', 'AbsoluteHand', 'RelativeHand', 'BlockFace',
'Difficulty', 'Dimension', 'GameMode', 'OriginPoint'
)
class Enum(object):
# Return a human-readable string representation of an enum value.
@classmethod
def name_from_value(cls, value):
for name, name_value in cls.__dict__.items():
if name.isupper() and name_value == value:
return name
class BitFieldEnum(Enum):
@classmethod
def name_from_value(cls, value):
if not isinstance(value, int):
return
ret_names = []
ret_value = 0
for cls_name, cls_value in sorted(
[(n, v) for (n, v) in cls.__dict__.items()
if isinstance(v, int) and n.isupper() and v | value == value],
reverse=True, key=lambda p: p[1]
):
if ret_value | cls_value != ret_value or cls_value == value:
ret_names.append(cls_name)
ret_value |= cls_value
if ret_value == value:
return '|'.join(reversed(ret_names)) if ret_names else '0'
# Designation of one of a player's hands, in absolute terms.
class AbsoluteHand(Enum):
LEFT = 0
RIGHT = 1
# Designation of one a player's hands, relative to a choice of main/off hand.
class RelativeHand(Enum):
MAIN = 0
OFF = 1
# Designation of one of a block's 6 faces.
class BlockFace(Enum):
BOTTOM = 0 # -Y
TOP = 1 # +Y
NORTH = 2 # -Z
SOUTH = 3 # +Z
WEST = 4 # -X
EAST = 5 # +X
# A dict mapping Vector tuples to the corresponding BlockFace values.
# When accessing this dict, plain tuples also match. For example:
# >>> BlockFace.from_vector[0, 0, -1] == BlockFace.NORTH
# True
from_vector = {
Vector(0, -1, 0): BOTTOM,
Vector(0, +1, 0): TOP,
Vector(0, 0, -1): NORTH,
Vector(0, 0, +1): SOUTH,
Vector(-1, 0, 0): WEST,
Vector(+1, 0, 0): EAST,
}
# A dict mapping BlockFace values to unit Position tuples.
# This is the inverse mapping of face_by_position. For example:
# >>> BlockFace.to_vector[BlockFace.NORTH]
# Position(x=0, y=0, z=-1)
to_vector = {fce: pos for (pos, fce) in from_vector.items()}
# Designation of a world's difficulty.
class Difficulty(Enum):
PEACEFUL = 0
EASY = 1
NORMAL = 2
HARD = 3
# Designation of a world's dimension.
class Dimension(Enum):
NETHER = -1
OVERWORLD = 0
END = 1
# Designation of a player's gamemode.
class GameMode(Enum):
SURVIVAL = 0
CREATIVE = 1
ADVENTURE = 2
SPECTATOR = 3
# Currently designates an entity's feet or eyes.
# Used in the Face Player Packet
class OriginPoint(Enum):
FEET = 0
EYES = 1
| [
"yqx791125833@gmail.com"
] | yqx791125833@gmail.com |
3149d589f255cdf12badc4b0f417d19889688f11 | c48a2c2141cb57bfc6be30ae9e18b5526e1ef625 | /simvascular-python-scripts/geom_stats_demo.py | 9656d83aaf661e61f36a2f4ae996b956646e8acb | [] | no_license | Arash67/cardiovascular | 4e3e2b485e2fdb5a4d6e413261521fd20613addd | d2db6cdec30225fd89a37e9ef7dc99dfdb25d4a3 | refs/heads/master | 2023-08-10T16:43:52.883683 | 2021-09-29T20:33:16 | 2021-09-29T20:33:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,682 | py | from sv import *
import sv_vis as vis
import random, os
# ##############################################################################
# This script is a demo of the various features offered by the Geom.xyz API set.
# Some demos use a simple intersecting pair of hollow cylinders, while others
# make use of a simple 1 x 1 x 1 cube so as to make comprehension of the output
# format and content easier.
# ##############################################################################
#
# Creates a lofted surface from the provided source path with circular contours
# with radii +/- little value from initial_radius.
#
# Args:
# src_path_name (String): Name of the source path.
# initial_radius (double): Initial "average" radius to use.
# Returns:
# String: Name of the resulting lofted solid.
def create_surface_from_path(src_path_name, initial_radius):
# Load in the source path and store the position points.
path = Path.pyPath()
path.GetObject(src_path_name)
path_pos_points = path.GetPathPosPts()
# Create contours from the points.
kernel = "Circle"
Contour.SetContourKernel(kernel)
prev_radius = initial_radius # Last radius from which to add/subtract a random number.
path_ctr_pds = [] # List of polydata objects created from the contours.
# Extract every 10'th contour.
for id in range(int(path.GetPathPtsNum() / 10)):
contour = Contour.pyContour()
# Create a new blank contour object.
path_contour_name = src_path_name + "-contour" + str(id * 10)
create_from_point = id * 10
contour.NewObject(path_contour_name, src_path_name, create_from_point)
# Randomize the radius and create the circular contour. Coords for the
# center must be defined in absolute 3D space, so we must grab the real
# position point from the path data.
center_pt = path_pos_points[create_from_point]
radius = prev_radius + 0 * (random.random() - 0.5)
prev_radius = radius
contour.SetCtrlPtsByRadius(center_pt, radius)
# Extract a polydata object from the created contour and save its name in the list.
pd_path_name = path_contour_name + "-pd"
path_ctr_pds.append(pd_path_name)
contour.GetPolyData(pd_path_name)
# Resample the contour polydata objects.
num_samples = 60 # Number of samples to take around circumference of contour?
path_ctrs_pds_rspl = []
for id in path_ctr_pds:
new_id = id + "_resampled"
path_ctrs_pds_rspl.append(new_id)
Geom.SampleLoop(id, num_samples, new_id)
# Loft the resampled contours.
path_lofted_name = src_path_name + "_lofted"
num_contours = len(path_ctrs_pds_rspl) * 4 # Including endpoints, how many contours to interpolate between the end caps.
num_linear_pts_along_length = 120 # ?
num_modes = 20 # ?
use_FFT = 0 # ?
use_linear_sample_along_length = 1 # Linearly interpolate the contours see num_contours_to_loft.
Geom.LoftSolid(path_ctrs_pds_rspl, path_lofted_name, num_samples,
num_contours, num_linear_pts_along_length, num_modes,
use_FFT, use_linear_sample_along_length)
return path_lofted_name
#
# Initialize the first path.
#
# Create new path object.
path1_name = "path1"
path1 = Path.pyPath()
path1.NewObject(path1_name)
# Give it some points.
path1.AddPoint([2.0, 2.0, 0.0])
path1.AddPoint([3.0, 3.0, 0.0])
path1.AddPoint([4.0, 4.0, 0.0])
path1.AddPoint([5.0, 5.0, 0.0])
# Generate the path from the added control points.
path1.CreatePath()
#
# Initialize the second path.
#
# Create new path object.
path2_name = "path2"
path2 = Path.pyPath()
path2.NewObject(path2_name)
# Give it some points.
path2.AddPoint([0.0, 0.0, 0.0])
path2.AddPoint([0.0, 1.0, 0.0])
path2.AddPoint([0.0, 2.0, 0.0])
path2.AddPoint([0.0, 3.0, 0.0])
path2.AddPoint([0.0, 4.0, 0.0])
# Generate the path from the added control points.
path2.CreatePath()
# Create surfaces from the paths.
path1_surface_name = create_surface_from_path(path1_name, 1.0)
path2_surface_name = create_surface_from_path(path2_name, 2.0)
path1_cap_surface_name = path1_surface_name + "_capped"
path2_cap_surface_name = path2_surface_name + "_capped"
VMTKUtils.Cap_with_ids(path1_surface_name, path1_cap_surface_name, 0, 0)
VMTKUtils.Cap_with_ids(path2_surface_name, path2_cap_surface_name, 0, 0)
merged_solid_name_pd = "merged_solid"
# Geom.Union(path1_surface_name, path2_surface_name, merged_solid_name_pd)
Geom.Union(path1_cap_surface_name, path2_cap_surface_name, merged_solid_name_pd)
#
# Initialize alternate cube testing platform.
#
cube_name = "cube"
cube_name_pd = cube_name + "_pd"
cube_size = [1.0, 1.0, 1.0]
cube_center = [0.0, 0.0, 0.0]
Solid.SetKernel('PolyData')
cube = Solid.pySolidModel()
cube.Box3d(cube_name, cube_size, cube_center)
cube.GetPolyData(cube_name_pd)
# ######################################
# BEGIN GEOM API DEMO
# ######################################
# ERR: VtkUtils_GetLines failed ?
# print("\n[geom_stats_demo] Geom.NumClosedLineRegions()")
# # result = Geom.NumClosedLineRegions(merged_solid_name_pd)
# result = Geom.NumClosedLineRegions(cube_name_pd)
# print("[geom_stats_demo] \tResult: " + str(result))
print("\n[geom_stats_demo] Geom.Translate()")
translate_vec = [1.0, 2.0, 3.0]
translated_solid_name = merged_solid_name_pd + "_translated"
Geom.Translate(merged_solid_name_pd, translate_vec, translated_solid_name)
print("\n[geom_stats_demo] Geom.ScaleAvg()")
scale_factor = 2.0
scaled_solid_name = merged_solid_name_pd + "_scaled"
Geom.ScaleAvg(merged_solid_name_pd, scale_factor, scaled_solid_name)
# ERR: VtkUtils_GetLines failed ?
# print("\n[geom_stats_demo] Geom.GetOrderedPts()")
# # result = Geom.GetOrderedPts(merged_solid_name_pd)
# result = Geom.GetOrderedPts(cube_name_pd)
# print("[geom_stats_demo] \tResult: " + str(result))
print("\n[geom_stats_demo] Geom.PolysClosed()")
result = Geom.PolysClosed(cube_name_pd)
print("[geom_stats_demo] \tResult: " + str(result))
print("\n[geom_stats_demo] Geom.SurfArea()")
result = Geom.SurfArea(cube_name_pd)
print("[geom_stats_demo] \tResult: " + str(result))
print("\n[geom_stats_demo] Geom.PrintTriStats()")
Geom.PrintTriStats(merged_solid_name_pd)
print("\n[geom_stats_demo] Geom.PrintSmallPolys()")
min_edge_size = 0.1
Geom.PrintSmallPolys(merged_solid_name_pd, min_edge_size)
print("\n[geom_stats_demo] Geom.Bbox()")
result = Geom.Bbox(cube_name_pd)
print("[geom_stats_demo] \tResult: (x1, y1, z1, x2, y2, z2) " + str(result))
print("\n[geom_stats_demo] Geom.Classify()")
point = [0.0, 0.0, 0.0]
result = Geom.Classify(merged_solid_name_pd, point)
print("[geom_stats_demo] \tResult: " + str(result))
# TODO(Dave or other): SolidModel.GetRegionIds() relies on an unimplemented function.
# RR: sys_geom_Get2DPgon called with non-planar input cvPolyData ?
# print("\n[geom_stats_demo] Geom.PtInPoly()")
# cube.GetRegionIds()
# faces_list = cube.GetFaceIds()
# print("faces_list:")
# print(faces_list)
# face_pd_name = "cube_face"
# cube.GetFacePolyData(face_pd_name, faces_list[0])
# point = [0.0, 0.0]
# use_previous_polygon = False
# result = Geom.PtInPoly(face_pd_name, point, use_previous_polygon)
# print("[geom_stats_demo] \tResult: " + str(result))
print("\n[geom_stats_demo] Geom.NumPts()")
result = Geom.NumPts(merged_solid_name_pd)
print("[geom_stats_demo] \tResult: " + str(result))
# TODO(Dave or other): 2dWindingNum isn't a valid keyword name. Python API is broken.
# print("\n[geom_stats_demo] Geom.2dWindingNum()")
# result = Geom.2dWindingNum(merged_solid_name)
# print("[geom_stats_demo] \tResult: " + str(result))
print("\n[geom_stats_demo] Geom.AvgPt()")
result = Geom.AvgPt(cube_name_pd)
print("[geom_stats_demo] \tResult: " + str(result))
print("\n[geom_stats_demo] Geom.FindDistance()")
point = [0.0, 0.0, 0.0]
result = Geom.FindDistance(merged_solid_name_pd, point)
print("[geom_stats_demo] \tResult: " + str(result))
print("\n[geom_stats_demo] Geom.Checksurface()")
result = Geom.Checksurface(merged_solid_name_pd)
print("[geom_stats_demo] \tResult: (num free edges, num bad edges) " + str(result))
print("\n[geom_stats_demo] Geom.Clean()")
cleaned_name = merged_solid_name_pd + "_cleaned"
Geom.Clean(merged_solid_name_pd, cleaned_name)
# Sometimes errors out with: "current kernel is not valid (6)" ?
print("\n[geom_stats_demo] Geom.All_union()")
inter_t = True
destination_name = merged_solid_name_pd + "_merged_again"
result = Geom.All_union([path1_surface_name, path2_surface_name], inter_t, destination_name)
print("\n[geom_stats_demo] Geom.Intersect()")
intersected_solid_name = "intersected_solid"
Geom.Intersect(merged_solid_name_pd, cube_name_pd, intersected_solid_name)
# TODO(Neil): Figure out how to visualize this model. How to get it into a solid model object?
window_name = "INTERSECTED Model"
ren1, renwin1 = vis.initRen(window_name)
actor1 = vis.pRepos(ren1, intersected_solid_name)
# Set the renderer to draw the solids as a wireframe.
vis.polyDisplayWireframe(ren1, intersected_solid_name)
print("\n[geom_stats_demo] Geom.Subtract()")
subtracted_solid_name = "subtracted_solid"
Geom.Subtract(merged_solid_name_pd, cube_name_pd, subtracted_solid_name)
# TODO(Neil): Figure out how to visualize this model. How to get it into a solid model object?
window_name = "SUBTRACTED Model"
ren2, renwin2 = vis.initRen(window_name)
actor2 = vis.pRepos(ren2, subtracted_solid_name)
# Set the renderer to draw the solids as a wireframe.
vis.polyDisplayWireframe(ren2, subtracted_solid_name)
vis.interact(ren1, 15000)
vis.interact(ren2, 15000)
| [
"davep@stanford.edu"
] | davep@stanford.edu |
e78af068c92d8800972f3fb53b36ca93057e4a61 | f6f7caf3829c2d0384f729f00b88f1c0db474f7a | /bin/py3-start-shell.py | a8c3f50e50faf429a3492566c9f3d21f0ed68b2c | [] | no_license | mcptr/dotfiles | 44e2f659264c8e98a293f8edd99b10fc75d9191d | 7f876a3cdba2178fefa9e6f7b4372f2caf70bb6d | refs/heads/master | 2020-09-29T08:44:48.552455 | 2019-12-28T21:08:31 | 2019-12-28T21:08:31 | 227,002,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | #!/usr/bin/env python3
import os
import sys
import re
import time
| [
"dev@metaceptron.com"
] | dev@metaceptron.com |
a019180e0075ec7252a926b93dd4c554d208df3b | c7a6f8ed434c86b4cdae9c6144b9dd557e594f78 | /ECE364/.PyCharm40/system/python_stubs/348993582/gio/_gio/DataStreamNewlineType.py | c4d1842167c838cc909223916099ae59e915cb29 | [] | no_license | ArbalestV/Purdue-Coursework | 75d979bbe72106975812b1d46b7d854e16e8e15e | ee7f86145edb41c17aefcd442fa42353a9e1b5d1 | refs/heads/master | 2020-08-29T05:27:52.342264 | 2018-04-03T17:59:01 | 2018-04-03T17:59:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | # encoding: utf-8
# module gio._gio
# from /usr/lib64/python2.6/site-packages/gtk-2.0/gio/_gio.so
# by generator 1.136
# no doc
# imports
import gio as __gio
import glib as __glib
import gobject as __gobject
import gobject._gobject as __gobject__gobject
class DataStreamNewlineType(__gobject.GEnum):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is ''
__enum_values__ = {
0: 0,
1: 1,
2: 2,
3: 3,
}
__gtype__ = None # (!) real value is ''
| [
"pkalita@princeton.edu"
] | pkalita@princeton.edu |
d7415963568e108a57b30ceb8792691ab224fd47 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/stdlib-big-1305.py | a5bb9e38d26c183277becd407d747652647d865c | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,997 | py | # ChocoPy library functions
def int_to_str(x: int) -> str:
digits:[str] = None
result:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str2(x: int, x2: int) -> str:
digits:[str] = None
digits2:[str] = None
result:str = ""
result2:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str3(x: int, x2: int, x3: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str4(x: int, x2: int, x3: int, x4: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
digits4:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
result4:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str5(x: int, x2: int, x3: int, x4: int, x5: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
digits4:[str] = None
digits5:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
result4:str = ""
result5:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def str_to_int(x: str) -> int:
result:int = 0
digit:int = 0
char:str = ""
sign:int = 1
first_char:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = $Exp * 10 + digit
# Compute result
return result * sign
def str_to_int2(x: str, x2: str) -> int:
result:int = 0
result2:int = 0
digit:int = 0
digit2:int = 0
char:str = ""
char2:str = ""
sign:int = 1
sign2:int = 1
first_char:bool = True
first_char2:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int3(x: str, x2: str, x3: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
char:str = ""
char2:str = ""
char3:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int4(x: str, x2: str, x3: str, x4: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
result4:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
digit4:int = 0
char:str = ""
char2:str = ""
char3:str = ""
char4:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
sign4:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
first_char4:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int5(x: str, x2: str, x3: str, x4: str, x5: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
result4:int = 0
result5:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
digit4:int = 0
digit5:int = 0
char:str = ""
char2:str = ""
char3:str = ""
char4:str = ""
char5:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
sign4:int = 1
sign5:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
first_char4:bool = True
first_char5:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
# Input parameters
c:int = 42
c2:int = 42
c3:int = 42
c4:int = 42
c5:int = 42
n:int = 10
n2:int = 10
n3:int = 10
n4:int = 10
n5:int = 10
# Run [-nc, nc] with step size c
s:str = ""
s2:str = ""
s3:str = ""
s4:str = ""
s5:str = ""
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
i = -n * c
# Crunch
while i <= n * c:
s = int_to_str(i)
print(s)
i = str_to_int(s) + c
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
5b327ff182d5d14550e1064d5c2185b50eca57df | 297497957c531d81ba286bc91253fbbb78b4d8be | /testing/web-platform/tests/webdriver/tests/bidi/__init__.py | 5cf37be6f9713640cde99d8b18bff4479093afbd | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | marco-c/gecko-dev-comments-removed | 7a9dd34045b07e6b22f0c636c0a836b9e639f9d3 | 61942784fb157763e65608e5a29b3729b0aa66fa | refs/heads/master | 2023-08-09T18:55:25.895853 | 2023-08-01T00:40:39 | 2023-08-01T00:40:39 | 211,297,481 | 0 | 0 | NOASSERTION | 2019-09-29T01:27:49 | 2019-09-27T10:44:24 | C++ | UTF-8 | Python | false | false | 3,290 | py | from typing import Any, Callable, Dict
from webdriver.bidi.modules.script import ContextTarget
def recursive_compare(expected: Any, actual: Any) -> None:
if callable(expected):
expected(actual)
return
assert type(expected) == type(actual)
if type(expected) is list:
assert len(expected) == len(actual)
for index, _ in enumerate(expected):
recursive_compare(expected[index], actual[index])
return
if type(expected) is dict:
assert expected.keys() <= actual.keys(), \
f"Key set should be present: {set(expected.keys()) - set(actual.keys())}"
for key in expected.keys():
recursive_compare(expected[key], actual[key])
return
assert expected == actual
def any_bool(actual: Any) -> None:
assert isinstance(actual, bool)
def any_dict(actual: Any) -> None:
assert isinstance(actual, dict)
def any_int(actual: Any) -> None:
assert isinstance(actual, int)
def any_int_or_null(actual: Any) -> None:
if actual is not None:
any_int(actual)
def any_list(actual: Any) -> None:
assert isinstance(actual, list)
def any_string(actual: Any) -> None:
assert isinstance(actual, str)
def any_string_or_null(actual: Any) -> None:
if actual is not None:
any_string(actual)
def int_interval(start: int, end: int) -> Callable[[Any], None]:
def _(actual: Any) -> None:
any_int(actual)
assert start <= actual <= end
return _
async def create_console_api_message(bidi_session, context: str, text: str):
await bidi_session.script.call_function(
function_declaration="""(text) => console.log(text)""",
arguments=[{"type": "string", "value": text}],
await_promise=False,
target=ContextTarget(context["context"]),
)
return text
async def get_device_pixel_ratio(bidi_session, context: str) -> float:
result = await bidi_session.script.call_function(
function_declaration="""() => {
return window.devicePixelRatio;
}""",
target=ContextTarget(context["context"]),
await_promise=False)
return result["value"]
async def get_element_dimensions(bidi_session, context, element):
result = await bidi_session.script.call_function(
arguments=[element],
function_declaration="""(element) => {
const rect = element.getBoundingClientRect();
return { height: rect.height, width: rect.width }
}""",
target=ContextTarget(context["context"]),
await_promise=False,
)
return remote_mapping_to_dict(result["value"])
async def get_viewport_dimensions(bidi_session, context: str):
expression = """
({
height: window.innerHeight || document.documentElement.clientHeight,
width: window.innerWidth || document.documentElement.clientWidth,
});
"""
result = await bidi_session.script.evaluate(
expression=expression,
target=ContextTarget(context["context"]),
await_promise=False,
)
return remote_mapping_to_dict(result["value"])
def remote_mapping_to_dict(js_object) -> Dict:
obj = {}
for key, value in js_object:
obj[key] = value["value"]
return obj
| [
"mcastelluccio@mozilla.com"
] | mcastelluccio@mozilla.com |
8e1242fb0e5ba86f9c53c43d6463a61f6ac9212d | bbf1ae079309eca11270422d3f0d259d1515d430 | /numerical-tours/python/nt_solutions/ml_4_sgd/exo4.py | a9a0d7a44a6ba0f8f26665a55f1a3befa8b1563f | [
"BSD-2-Clause"
] | permissive | ZichaoDi/Di_MATLABTool | 5e6a67b613c4bcf4d904ddc47c2744b4bcea4885 | c071291c63685c236f507b2cb893c0316ab6415c | refs/heads/master | 2021-08-11T07:28:34.286526 | 2021-08-04T18:26:46 | 2021-08-04T18:26:46 | 149,222,333 | 9 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,091 | py | tau = .002/n
ElistG = np.zeros( ( int(niter/err_rate), nsamples) )
for irun in np.arange(0,nsamples):
w = np.zeros( (p,1) )
G = np.zeros( (p,n) ) # keep track of gradients
g = np.zeros( (p,1) )
for it in np.arange(0,niter):
if np.mod(it,err_rate)==1:
ElistG[ int(it/err_rate),irun ] = E(w,X,y)
i = int( np.floor(np.random.rand()*n) ) # draw uniformly
g1 = nablaEi(w,i)
# update grad
g = g - MakeCol(G[:,i]) + g1
G[:,i] = g1.flatten()
#
w = w - tau * g
vmin = np.min( (np.min(Elist), ElistS.flatten().min(), ElistA.flatten().min(), ElistG.flatten().min() ) )
u = np.log10(ElistS-vmin+1e-20)
v = np.log10(ElistA -vmin+1e-20)
w = np.log10(ElistG -vmin+1e-20)
plt.clf
plt.plot(1,np.Inf, 'b')
plt.plot(1,np.Inf, 'r')
plt.plot(1,np.Inf, 'g')
plt.plot( np.arange(0,niter,err_rate), u, 'b' )
plt.plot( np.arange(0,niter,err_rate), v, 'r' )
plt.plot( np.arange(0,niter,err_rate), w, 'g' )
plt.axis((1,niter, np.min(w), np.max(w) ))
plt.title('$log(E(w_l) - min E)$')
plt.legend( ('SGD', 'SGA', 'SAG') )
| [
"wendydi@compute001.mcs.anl.gov"
] | wendydi@compute001.mcs.anl.gov |
66757f607bc7c592aa8e66ecab1146078a335165 | 6cb1a076068b875b7200e6f21c18c89ede521cb4 | /backend/venv/bin/pip2.7 | 117bb20874a48fa0526e714827f63ebecdb7e137 | [] | no_license | kacperadach/mbta_planner | 4c79abc8f75eea03f1056732836b97e501a8e21f | 545dca40bb7a6da7893ae90bc6791254cece5806 | refs/heads/master | 2021-01-22T10:33:33.138286 | 2017-04-10T04:46:26 | 2017-04-10T04:46:26 | 82,012,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | 7 | #!/home/kacper/apps/mbta_planner/backend/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"kacperadach@gmail.com"
] | kacperadach@gmail.com |
947d0c8aea8acf28e458806c3f6e092fc46c7f4f | 9063052d8e2c294efa3b501d42aef2ac59d84fa0 | /codingPractice/python/리스트배열.py | 7e7e3af621c60fc3cdba6e367467891d40774964 | [] | no_license | yes99/practice2020 | ffe5502d23038eabea834ebc2b18ff724f849c4a | 100ac281f4fe6d0f991213802fbd8524451f1ac2 | refs/heads/master | 2021-07-08T00:54:19.728874 | 2021-06-13T05:52:07 | 2021-06-13T05:52:07 | 245,789,109 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | gold = ["박인비", "오혜리", "김소희", "구본찬", "장혜진", "기보배", "진종오", "박상영", "최미선", "김우진", "이승윤"]
silver = ["김종현", "안바울", "정보경"]
iron = ["차동민", "이태훈", "정경은", "신승찬"]
print("금메달 리스트")
print(gold)
print("은메달 리스트")
print(silver)
print("동메달 리스트")
print(iron)
print(gold[0])
print(silver[1:2])
print(iron[:5])
gold[1] = "오혜리2"
print(gold)
medal = gold + silver + iron
print(medal)
medalcount = len(gold) + len(silver) + len(iron)
print(medalcount) | [
"yes950324@naver.com"
] | yes950324@naver.com |
a3da7aaf43b349eb9cfafdd0d2439095ae10b4c0 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_patches.py | 25782f805aa8ebaefc49a490339be3c69495d80c | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py |
from xai.brain.wordbase.verbs._patch import _PATCH
#calss header
class _PATCHES(_PATCH, ):
def __init__(self,):
_PATCH.__init__(self)
self.name = "PATCHES"
self.specie = 'verbs'
self.basic = "patch"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
40638c5b6177ae097333ebc52d7fc8c752ed6428 | 583fdb9f37dea28ada24e335f1e44ba6cf587770 | /多线程/1114 按序打印.py | 8f732076821bc5c73d9944b3679d5b9ff3256511 | [] | no_license | Ford-z/LeetCode | 8c4c30eeaa3d8f02b24c8d0058c60f09c3a6debe | 88eeca3780b4dc77efce4f14d317ed1c872cf650 | refs/heads/master | 2021-11-21T00:51:05.314084 | 2021-09-16T15:45:18 | 2021-09-16T15:45:18 | 194,425,542 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,403 | py | 我们提供了一个类:
public class Foo {
public void first() { print("first"); }
public void second() { print("second"); }
public void third() { print("third"); }
}
三个不同的线程 A、B、C 将会共用一个 Foo 实例。
一个将会调用 first() 方法
一个将会调用 second() 方法
还有一个将会调用 third() 方法
请设计修改程序,以确保 second() 方法在 first() 方法之后被执行,third() 方法在 second() 方法之后被执行。
示例 1:
输入: [1,2,3]
输出: "firstsecondthird"
解释:
有三个线程会被异步启动。
输入 [1,2,3] 表示线程 A 将会调用 first() 方法,线程 B 将会调用 second() 方法,线程 C 将会调用 third() 方法。
正确的输出是 "firstsecondthird"。
示例 2:
输入: [1,3,2]
输出: "firstsecondthird"
解释:
输入 [1,3,2] 表示线程 A 将会调用 first() 方法,线程 B 将会调用 third() 方法,线程 C 将会调用 second() 方法。
正确的输出是 "firstsecondthird"。
提示:
尽管输入中的数字似乎暗示了顺序,但是我们并不保证线程在操作系统中的调度顺序。
你看到的输入格式主要是为了确保测试的全面性。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/print-in-order
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
class Foo:
def __init__(self):
#在这题里面功能都是类似的,就是添加阻塞,然后释放线程,只是类初始化的时候不能包含有参数,所以要写一句acquire进行阻塞,调用其他函数的时候按顺序release释放。
self.l1 = threading.Lock()
self.l1.acquire()
self.l2 = threading.Lock()
self.l2.acquire()
def first(self, printFirst: 'Callable[[], None]') -> None:
# printFirst() outputs "first". Do not change or remove this line.
printFirst()
self.l1.release()
def second(self, printSecond: 'Callable[[], None]') -> None:
self.l1.acquire()
# printSecond() outputs "second". Do not change or remove this line.
printSecond()
self.l2.release()
def third(self, printThird: 'Callable[[], None]') -> None:
self.l2.acquire()
# printThird() outputs "third". Do not change or remove this line.
printThird()
| [
"noreply@github.com"
] | Ford-z.noreply@github.com |
a5b460ebd46a40b3cf4f710cd39b082a851aadd9 | 623c053e83ae0641523d777987a438abd7d67c09 | /infra02_blast_info_proc.py | 34dc557032a8782bf6c8bb7f8b3280a7ab1435ca | [] | no_license | friedpine/BioModule | 601a8d3064fea1e60311a6ce4a3a7114166a98e7 | 59086d9e484c45ac0406099c0f68be8e5e617802 | refs/heads/master | 2021-01-01T17:31:45.190621 | 2015-02-02T06:58:55 | 2015-02-02T06:58:55 | 21,689,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,194 | py | import re,os,sys
import subprocess
sys.path.append('/data/Analysis/fanxiaoying/project/project01_polyA-RNAseq/modules')
import infra01_pos2info as in01
def blast_fastas(fa_db,fa_target,dbfile,outfile,evalue,wordsize):
print "Begin Blasting!"
cmd1 = "/data/Analysis/fanxiaoying/software/ncbi-blast-2.2.28+/bin/makeblastdb -dbtype nucl -in %s -out %s " %(fa_db,dbfile)
cmd2 = "/data/Analysis/fanxiaoying/software/ncbi-blast-2.2.28+/bin/blastn -query %s -task blastn -db %s -outfmt 7 -gapopen 5 -gapextend 2 -penalty -3 -reward 2 -evalue %s -word_size %s -out %s" %(fa_target,dbfile,evalue,wordsize,outfile)
subprocess.call(cmd1,shell=True)
subprocess.call(cmd2,shell=True)
def blast_fmt7_reading(event,file,match_cuf,report):
file = open(file)
values = []
for line in file:
if re.match('^#',line):
continue
S1 = re.split('\s+',line)
if report == 'RC':
if int(S1[9])<int(S1[8]) and int(S1[3])>=match_cuf:
values.append([event]+S1[0:12])
return values
def blast_fmt7_out_read_db_miRNA(file,DB_NAME,tablename,report):
import MySQLdb as mb
file = open(file)
values = []
for line in file:
if re.match('^#',line):
continue
S1 = re.split('\s+',line)
if report == 'RC':
if int(S1[9])<int(S1[8]):
values.append((S1[0],S1[1],S1[6]))
conn=mb.connect(host="localhost",user="root",passwd="123456",db=DB_NAME)
cursor = conn.cursor()
cursor.executemany("insert into "+tablename+" values(%s,%s,%s) ",values);
conn.commit()
def blast_ref_positions(cursor,species,ref,pos_list1,pos_list2,list1_name,list2_name,evalue,wordsize,match_cuf,folder,record,report,server="TANG"):
seq1=in01.get_pos_seqs(cursor,species,ref,pos_list1,server)
seq2=in01.get_pos_seqs(cursor,species,ref,pos_list2,server)
r1_file = folder+'/'+record+'_r1.fa'
r2_file = folder+'/'+record+'_r2.fa'
db_file = folder+'/'+record+'_db.db'
result_file = folder+'/'+record+'_blast.txt'
if len(list1_name) != len(seq1) or len(list1_name) != len(seq1):
print "Names not the Same length with Sequences!"
return 0
f = open(r1_file,'w')
for x in range(len(seq1)):
f.write('>%s\n%s\n' %(list1_name[x],seq1[x]))
f.close()
f = open(r2_file,'w')
for x in range(len(seq2)):
f.write('>%s\n%s\n' %(list2_name[x],seq2[x]))
f.close()
blast_fastas(r1_file,r2_file,db_file,result_file,evalue,wordsize)
return blast_fmt7_reading(record,result_file,match_cuf,report)
def blast_genome_multi_positions(species,r1,r2,evalue,wordsize,report):
genome = ref[species]['fa']['genome']
r1_file = './r1.fa'
r2_file = './r2.fa'
in1.genome_ranges_2_fa_file('mm10',r1,r1_file,'r1')
in1.genome_ranges_2_fa_file('mm10',r2,r2_file,'r2')
blast_fastas(r1_file,r2_file,'./temp_db.db','./temp_blast_r1r2.txt',evalue,wordsize)
result = blast_fmt7_out_read('./temp_blast_r1r2.txt',report)
return result
def blast_two_sequences(seq1,seq2,evalue,wordsize,report):
r1_file = open('./r1.fa','w')
r2_file = open('./r2.fa','w')
print >>r1_file,'>'+'r1\n'+seq1+'\n'
print >>r2_file,'>'+'r2\n'+seq2+'\n'
r1_file.close()
r2_file.close()
blast_fastas('./r1.fa','./r2.fa','./temp_db.db','./temp_blast_r1r2.txt',evalue,wordsize)
result = blast_fmt7_out_read('./temp_blast_r1r2.txt',report)
return result
| [
"friedpine@gmail.com"
] | friedpine@gmail.com |
cdc871b20b9f4817b4d05b42131246bf18164e43 | 830a25c8a3e1ed050bc8211a7aa41456f1a3fb30 | /docs/conf.py | 07f9999f884b1aa1a81a56befaaf91d80868b628 | [
"Apache-2.0"
] | permissive | k0t3n/django-fluent-contents | 98ceb763157bea2aa0dd5e2df616a7590254aa06 | 964b94629462220ff5460dc870560b6d8dfd4384 | refs/heads/master | 2020-04-08T01:57:40.000009 | 2018-08-27T19:51:24 | 2018-08-27T19:51:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,557 | py | # -*- coding: utf-8 -*-
#
# django-fluent-contents documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 21 15:06:42 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('_ext'))
sys.path.insert(0, os.path.abspath('..'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'djangodummy.settings'
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.graphviz',
'sphinx.ext.intersphinx',
'djangoext.docstrings',
'djangoext.roles',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-fluent-contents'
copyright = u'2011-2017, Diederik van der Boor'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0.4'
# The full version, including alpha/beta/rc tags.
release = '2.0.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-fluent-contentsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-fluent-contents.tex', u'django-fluent-contents Documentation',
u'Diederik van der Boor', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-fluent-contents', u'django-fluent-contents Documentation',
[u'Diederik van der Boor'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-fluent-contents', u'django-fluent-contents Documentation',
u'Diederik van der Boor', 'django-fluent-contents', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'http://docs.python.org/': None,
'https://docs.djangoproject.com/en/dev': 'https://docs.djangoproject.com/en/dev/_objects',
'parler': ('http://django-parler.readthedocs.org/en/latest/', None),
'comments': ('http://django-contrib-comments.readthedocs.org/en/latest/', None),
}
| [
"vdboor@edoburu.nl"
] | vdboor@edoburu.nl |
381eea3f35bcda39830a030b2fa50af90ff2ab13 | daffa2518efcf00aec9d798c7e32cbb53bd66f01 | /fragmenstein/victor/_victor_mode.py | 9f0914654d2402a248066a05322bc48b87b4ff6a | [
"MIT"
] | permissive | matteoferla/Fragmenstein | 9023235594a002458f7d46eaa8344f3b5df54fb2 | c03945c089beec35b7aabb83dc1efd9cc57ac281 | refs/heads/master | 2023-09-01T09:40:52.695247 | 2023-08-30T06:30:22 | 2023-08-30T06:30:22 | 254,173,036 | 120 | 10 | MIT | 2023-07-06T23:08:02 | 2020-04-08T18:48:30 | Python | UTF-8 | Python | false | false | 338 | py | import enum
class VictorMinMode(enum.Enum):
"""Victor mode for minimisation
TODO this is not used yet
"""
NULL = -1 #: no minimisation at all
IGOR = 0 #: use Igor's minimisation (PyRosetta)
IGOR_NODISK = 1 #: use Igor's minimisation without writing to disk
FRITZ = 2 #: use Fritz's minimisation (openMM)
| [
"matteo.ferla@gmail.com"
] | matteo.ferla@gmail.com |
4d07e3354d036c43888b12b846be7bc48a1d0f97 | 527a8837c449d7fd0ccacf33cd6e6be0d6308038 | /postAnalysis/combineArffs.py | 046b297815849f6e17cea1a83ecc64030741dba0 | [] | no_license | nparslow/L2FrenchWritingAnalyser | c4539cf67f6885224d83890c31d95080c49fa175 | 6708e560d65120d1946f1d08a460ca8fe97a548f | refs/heads/master | 2020-12-07T03:50:12.090153 | 2015-07-18T18:28:12 | 2015-07-18T18:28:12 | 39,304,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,792 | py | import codecs
import re
__author__ = 'nparslow'
def readarff( filename ):
vars = []
rows = []
header = ""
with codecs.open( filename, mode="r", encoding="utf8") as f1:
indata = False
for line in f1:
if line.lower().startswith("@attribute"):
att, name, typ = re.split(ur'\s', line.strip(), flags=re.UNICODE)
vars.append( (att, name, typ) )
elif line.lower().startswith("@data"):
indata = True
elif indata:
row = line.strip().split(',')
rows.append(row)
else:
# add to header
header += line
return header, vars, rows
def main():
arff1 = "/home/nparslow/Documents/AutoCorrige/Corpora/figures/testclass.arff"
arff2 = "/home/nparslow/Documents/AutoCorrige/Corpora/figures/testtrees.arff"
outarff = "/home/nparslow/Documents/AutoCorrige/Corpora/figures/testcombined.arff"
header = ""
header1, vars1, rows1 = readarff(arff1)
header2, vars2, rows2 = readarff(arff2)
with codecs.open( outarff, mode="w", encoding="utf8") as of:
of.write(header1)
nvars = 0
for i_var in range(len(vars1)-1):
var = vars1[i_var]
#print var
of.write( u"\t".join(var) + "\n")
nvars += 1
for i_var in range(len(vars2)):
var = vars2[i_var]
of.write( "\t".join(var) + "\n")
nvars += 1
of.write("\n")
of.write("@DATA\n")
rowlen = 0
for row1, row2 in zip(rows1, rows2):
of.write(",".join(row1[:-1]+row2) + "\n")
rowlen = len(row1[:-1] + row2)
print "vars", nvars, rowlen
if __name__ == "__main__":
main()
| [
"nparslow@yahoo.com.au"
] | nparslow@yahoo.com.au |
0a34545753e9dae7e7f451a5885ac854f7f9310c | be5d853783fab4162981c460c35856bf44bf2148 | /mayan/apps/quotas/tests/test_hooks.py | b700c3a9e5d8e422d53b5c8f22e742b1ece03aba | [
"Apache-2.0"
] | permissive | Hita-K/Mayan-EDMS | 50900189cf504807aa6e41d72fd8e4fc50d5d5b3 | a5b691b28f765d9eea7bf6b2016657c943fbbe4a | refs/heads/master | 2023-08-29T08:20:58.859259 | 2021-11-08T04:40:39 | 2021-11-08T04:40:39 | 295,247,959 | 0 | 0 | NOASSERTION | 2021-11-08T04:40:40 | 2020-09-13T22:21:57 | Python | UTF-8 | Python | false | false | 4,315 | py | import logging
from mayan.apps.common.tests.base import GenericViewTestCase
from mayan.apps.documents.models.document_models import Document
from mayan.apps.documents.permissions import (
permission_document_create, permission_document_new_version
)
from mayan.apps.documents.tests.base import DocumentTestMixin
from mayan.apps.documents.tests.literals import TEST_SMALL_DOCUMENT_PATH
from mayan.apps.sources.tests.mixins import (
DocumentUploadWizardViewTestMixin, DocumentVersionUploadViewTestMixin,
SourceTestMixin
)
from ..classes import QuotaBackend
from ..exceptions import QuotaExceeded
from ..quota_backends import DocumentCountQuota, DocumentSizeQuota
class QuotaHooksTestCase(
DocumentTestMixin, DocumentUploadWizardViewTestMixin,
DocumentVersionUploadViewTestMixin, SourceTestMixin, GenericViewTestCase
):
auto_upload_test_document = False
def setUp(self):
super(QuotaHooksTestCase, self).setUp()
# Increase the initial usage count to 1 by uploading a document
# as the test case user.
self._upload_test_document(_user=self._test_case_user)
self.test_case_silenced_logger_new_level = logging.FATAL + 10
self._silence_logger(name='mayan.apps.sources.views')
self._silence_logger(name='mayan.apps.common.middleware.error_logging')
def tearDown(self):
QuotaBackend.connect_signals()
super(QuotaHooksTestCase, self).tearDown()
def test_document_quantity_quota_and_source_upload_wizard_view_with_permission(self):
self.test_quota_backend = DocumentCountQuota
self.test_quota = DocumentCountQuota.create(
documents_limit=1,
document_type_all=True,
document_type_ids=(),
group_ids=(),
user_all=True,
user_ids=(),
)
self.test_quota_backend.signal.disconnect(
dispatch_uid='quotas_handler_process_signal',
sender=self.test_quota_backend.sender
)
self.grant_permission(permission=permission_document_create)
document_count = Document.objects.count()
with self.assertRaises(expected_exception=QuotaExceeded):
self._request_upload_wizard_view()
self.assertEqual(Document.objects.count(), document_count)
def test_document_size_quota_and_source_upload_wizard_view_with_permission(self):
self.test_quota_backend = DocumentSizeQuota
self.test_quota = DocumentSizeQuota.create(
document_size_limit=0.01,
document_type_all=True,
document_type_ids=(),
group_ids=(),
user_all=True,
user_ids=(),
)
self.test_quota_backend.signal.disconnect(
dispatch_uid='quotas_handler_process_signal',
sender=self.test_quota_backend.sender
)
self.grant_permission(permission=permission_document_create)
document_count = Document.objects.count()
with self.assertRaises(expected_exception=QuotaExceeded):
self._request_upload_wizard_view()
self.assertEqual(Document.objects.count(), document_count)
def test_document_size_quota_and_document_version_upload_with_access(self):
self.test_quota_backend = DocumentSizeQuota
self.test_quota = DocumentSizeQuota.create(
document_size_limit=0.01,
document_type_all=True,
document_type_ids=(),
group_ids=(),
user_all=True,
user_ids=(),
)
self.test_quota_backend.signal.disconnect(
dispatch_uid='quotas_handler_process_signal',
sender=self.test_quota_backend.sender
)
self.grant_access(
obj=self.test_document,
permission=permission_document_new_version
)
version_count = self.test_document.versions.count()
with self.assertRaises(expected_exception=QuotaExceeded):
with open(TEST_SMALL_DOCUMENT_PATH, mode='rb') as file_object:
self._request_document_version_upload_view(
source_file=file_object
)
self.test_document.refresh_from_db()
self.assertEqual(
self.test_document.versions.count(), version_count
)
| [
"roberto.rosario@mayan-edms.com"
] | roberto.rosario@mayan-edms.com |
bf4d1ed9fc87262204919ea3353d134aad41ab79 | 30eb942d849dab9250bbd541a8d7128d15be8556 | /rimware_impl/peripheral_simulator/characteristics/DeviceInfo.py | f90cbb756673fe3a8f42f6ed4623ff64bea671bd | [] | no_license | cjhuo/Lab_projects-EcoBT-HOST-N-SERVER | 55e42b1e4d5f88bc978f5b6c07ab3798626a88fa | 396cb823ed74552985f4afa157fe3887afe48b65 | refs/heads/master | 2020-06-06T12:25:23.111232 | 2014-01-31T07:03:02 | 2014-01-31T07:03:02 | 26,940,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,461 | py | '''
Created on Jan 25, 2014
@author: cjhuo
'''
from Foundation import *
#from PyObjCTools import AppHelper
from config_peripheral import *
from objc import *
import struct, binascii
from Characteristic import Characteristic
class DeviceInfo(Characteristic):
def initializeInstance(self):
print "Initializing Characteristic Instance"
self.instance = CBMutableCharacteristic.alloc().initWithType_properties_value_permissions_(CBUUID.UUIDWithString_(self.UUID),
CBCharacteristicPropertyRead,
nil, # ensures the value is treated dynamically
CBAttributePermissionsReadable)
def initializeDescriptors(self):
print "Initializing descriptors.."
self.instance._.descriptors = [CBMutableDescriptor.alloc().
initWithType_value_(CBUUID.UUIDWithString_(CBUUIDCharacteristicUserDescriptionString),
u'DeviceInformation')]
'''
return unencrypted return value,
but should pre-packed into string
if value is not a string
'''
def handleReadRequest(self):
message = 0xC8E0EBFFFE16B31A
data = struct.pack("@Q", message)
return NSData.alloc().initWithBytes_length_(data, len(data))
| [
"chengjia.huo@gmail.com"
] | chengjia.huo@gmail.com |
985bbe4a43dea75355373e07eedc193387f121d0 | 98cb310b3a8dea5e07dc2359a07ef623e9a153d1 | /web-env/bin/html2text | e2e734afa81cc52de74bedd9ed310e0c7fce4171 | [
"MIT"
] | permissive | Amirsorouri00/web-search-engine | 6c600fb924f3b2e883f746e8075e33954effcc79 | 00bf463b29490f5285ee44cd351c6de131f04f3a | refs/heads/master | 2020-06-01T12:46:37.612714 | 2019-07-24T14:25:54 | 2019-07-24T14:25:54 | 190,783,910 | 0 | 0 | MIT | 2019-06-07T17:30:51 | 2019-06-07T17:30:51 | null | UTF-8 | Python | false | false | 308 | #!/home/amirsorouri00/Desktop/search-engine/myproject/ui/search-engine/web-search-engine/web-env/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from html2text.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"amirsorouri26@gmail.com"
] | amirsorouri26@gmail.com | |
d1450be5d59eb3da24bfbe1d94a664b1aa7aeebe | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_leaved.py | bdb9559efe761b1e3540d4581ec7bf021b77e9ab | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py |
from xai.brain.wordbase.nouns._leave import _LEAVE
#calss header
class _LEAVED(_LEAVE, ):
def __init__(self,):
_LEAVE.__init__(self)
self.name = "LEAVED"
self.specie = 'nouns'
self.basic = "leave"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
e29494b2115d7a7c7d0572aa10dc2e2accfe0c66 | ef875440cf82b6eed61bf6d9d0c6acfae5f90ef4 | /Assument/1.py | a444598961f3afee6b63ec45bdecaa2c7788745a | [] | no_license | Nitesh101/test | 5ab9b1e23167f8496d90d15484d57328b7f1430e | 4c413b3a056a633c5bcf93ae21c999ff67eeaa95 | refs/heads/master | 2020-03-29T09:04:32.723099 | 2018-09-21T09:33:41 | 2018-09-21T09:33:41 | 149,740,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | x = int(input("Enter a x value : "))
y = int(input("Enter a y value : "))
for i in range(1,x+1):
for j in range(1,y+1):
print i*j,
print "\n"
| [
"m.veeranitesh@gmail.com"
] | m.veeranitesh@gmail.com |
eef711bdb5d2e6ade393c56e5b4d273b3421f4be | 3f7d5999bb7e5a75454c8df2c5a8adcd1a8341ff | /tests/unit/modules/network/fortios/test_fortios_log_webtrends_setting.py | 769546de3cc54d9be79f8ace802727bdb7211b7d | [] | no_license | ansible-collection-migration/ansible.fortios | f7b1a7a0d4b69c832403bee9eb00d99f3be65e74 | edad6448f7ff4da05a6c856b0e7e3becd0460f31 | refs/heads/master | 2020-12-18T13:08:46.739473 | 2020-02-03T22:10:49 | 2020-02-03T22:10:49 | 235,393,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,106 | py | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible_collections.ansible.fortios.plugins.modules import fortios_log_webtrends_setting
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.modules.fortios_log_webtrends_setting.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_log_webtrends_setting_creation(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_webtrends_setting': {
'server': '192.168.100.3',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_webtrends_setting.fortios_log_webtrends(input_data, fos_instance)
expected_data = {
'server': '192.168.100.3',
'status': 'enable'
}
set_method_mock.assert_called_with('log.webtrends', 'setting', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_log_webtrends_setting_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_webtrends_setting': {
'server': '192.168.100.3',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_webtrends_setting.fortios_log_webtrends(input_data, fos_instance)
expected_data = {
'server': '192.168.100.3',
'status': 'enable'
}
set_method_mock.assert_called_with('log.webtrends', 'setting', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_log_webtrends_setting_idempotent(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_webtrends_setting': {
'server': '192.168.100.3',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_webtrends_setting.fortios_log_webtrends(input_data, fos_instance)
expected_data = {
'server': '192.168.100.3',
'status': 'enable'
}
set_method_mock.assert_called_with('log.webtrends', 'setting', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_log_webtrends_setting_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_webtrends_setting': {
'random_attribute_not_valid': 'tag',
'server': '192.168.100.3',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_webtrends_setting.fortios_log_webtrends(input_data, fos_instance)
expected_data = {
'server': '192.168.100.3',
'status': 'enable'
}
set_method_mock.assert_called_with('log.webtrends', 'setting', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| [
"ansible_migration@example.com"
] | ansible_migration@example.com |
7bba4954b0a42558a69b51b0de935e1b954ee6d7 | cc1ca9bd521e74905ce5c251c1c199772a85f457 | /c7n/filters/health.py | fd0197698f705dde5c5639ee66b96dbde0a7293f | [
"Apache-2.0"
] | permissive | anup19991/cloud-custodian | 17d2899a8f7875f4a309fd9189f152e8411a9fcb | b9d7083f8688d41950d264526d064b62d030ed9b | refs/heads/master | 2021-01-18T14:42:21.899375 | 2018-02-11T01:15:22 | 2018-02-11T01:15:22 | 89,010,520 | 0 | 0 | null | 2017-04-21T18:01:54 | 2017-04-21T18:01:54 | null | UTF-8 | Python | false | false | 3,523 | py | # Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from c7n.utils import local_session, chunks, type_schema
from .core import Filter
class HealthEventFilter(Filter):
"""Check if there are health events related to the resources
Health events are stored as annotation on a resource.
"""
schema = type_schema(
'health-event',
types={'type': 'array', 'items': {'type': 'string'}},
statuses={'type': 'array', 'items': {
'type': 'string',
'enum': ['open', 'upcoming', 'closed']
}})
permissions = ('health:DescribeEvents', 'health:DescribeAffectedEntities',
'health:DescribeEventDetails')
def process(self, resources, event=None):
if not resources:
return resources
client = local_session(self.manager.session_factory).client('health')
f = self.get_filter()
resource_map = {r[self.manager.get_model().id]: r for r in resources}
found = set()
seen = set()
for resource_set in chunks(resource_map.keys(), 100):
f['entityValues'] = resource_set
events = client.describe_events(filter=f)['events']
events = [e for e in events if e['arn'] not in seen]
entities = []
self.process_event(events, entities)
event_map = {e['arn']: e for e in events}
for e in entities:
rid = e['entityValue']
if rid not in resource_map:
continue
resource_map[rid].setdefault(
'c7n:HealthEvent', []).append(event_map[e['eventArn']])
found.add(rid)
seen.update(event_map.keys())
return [resource_map[rid] for rid in found]
def get_filter(self):
m = self.manager
if m.data['resource'] == 'ebs':
service = 'EBS'
else:
service = m.get_model().service.upper()
f = {'services': [service],
'eventStatusCodes': self.data.get(
'statuses', ['open', 'upcoming'])}
if self.data.get('types'):
f['eventTypeCodes'] = self.data.get('types')
return f
def process_event(self, health_events, entities):
client = local_session(self.manager.session_factory).client('health')
for event_set in chunks(health_events, 10):
event_map = {e['arn']: e for e in event_set}
for d in client.describe_event_details(
eventArns=event_map.keys()).get('successfulSet', ()):
event_map[d['event']['arn']]['Description'] = d[
'eventDescription']['latestDescription']
paginator = client.get_paginator('describe_affected_entities')
entities.extend(list(itertools.chain(
*[p['entities']for p in paginator.paginate(
filter={'eventArns': event_map.keys()})])))
| [
"kapilt@gmail.com"
] | kapilt@gmail.com |
3d2c196a911f8a01f1c1859e684b2e8d2cb128c1 | ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3 | /python/baiduads-sdk-auto/test/test_get_label_data_request_wrapper.py | 15931040218f882420fd9f73bfbedbf4b353fd59 | [
"Apache-2.0"
] | permissive | baidu/baiduads-sdk | 24c36b5cf3da9362ec5c8ecd417ff280421198ff | 176363de5e8a4e98aaca039e4300703c3964c1c7 | refs/heads/main | 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 | Apache-2.0 | 2023-06-02T05:19:40 | 2022-01-11T07:23:17 | Python | UTF-8 | Python | false | false | 984 | py | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.common.model.api_request_header import ApiRequestHeader
from baiduads.videodata.model.label_data_request import LabelDataRequest
globals()['ApiRequestHeader'] = ApiRequestHeader
globals()['LabelDataRequest'] = LabelDataRequest
from baiduads.videodata.model.get_label_data_request_wrapper import GetLabelDataRequestWrapper
class TestGetLabelDataRequestWrapper(unittest.TestCase):
"""GetLabelDataRequestWrapper unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetLabelDataRequestWrapper(self):
"""Test GetLabelDataRequestWrapper"""
# FIXME: construct object with mandatory attributes with example values
# model = GetLabelDataRequestWrapper() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"jiangyuan04@baidu.com"
] | jiangyuan04@baidu.com |
3e3c67f3870672215404026175c69d71e83aa1f4 | 2a75bc90e08104b1f8321603e4871959a4d6bf6c | /tests/service_atcoder.py | 7237cbf15dfb1578fd8936f43344e2cc67328832 | [
"MIT"
] | permissive | kyuridenamida/online-judge-tools | 27497a4fd57330dcaf5fb75ff8c33b1e8848345e | 59b37dcd8121af28413d72cbce74777b1f966b0e | refs/heads/master | 2020-04-27T20:38:59.722769 | 2019-03-09T06:41:36 | 2019-03-09T06:41:36 | 174,665,572 | 3 | 0 | null | 2019-03-09T08:02:44 | 2019-03-09T08:02:44 | null | UTF-8 | Python | false | false | 10,690 | py | # -*- coding: utf-8 -*-
import unittest
from onlinejudge.service.atcoder import AtCoderContest, AtCoderProblem, AtCoderService, AtCoderSubmission
class AtCoderSerivceTest(unittest.TestCase):
def test_from_url(self):
self.assertIsInstance(AtCoderService.from_url('https://atcoder.jp/'), AtCoderService)
self.assertIsInstance(AtCoderService.from_url('https://beta.atcoder.jp/'), AtCoderService)
self.assertIsInstance(AtCoderService.from_url('https://abc001.contest.atcoder.jp/'), AtCoderService)
self.assertIsInstance(AtCoderService.from_url('https://atcoder.jp/contests/agc001/submissions/806160'), AtCoderService)
self.assertIsNone(AtCoderService.from_url('https://codeforces.com/'))
def test_iterate_contests(self):
contests = list(AtCoderService().iterate_contests())
contest_ids = [contest.contest_id for contest in contests]
self.assertIn('arc001', contest_ids)
self.assertIn('abc100', contest_ids)
self.assertIn('kupc2012', contest_ids)
contest, = [contest for contest in contests if contest.contest_id == 'utpc2013']
self.assertEqual(contest.get_start_time().year, 2014)
self.assertEqual(contest.get_start_time().month, 3)
self.assertEqual(contest.get_start_time().day, 2)
self.assertEqual(contest.get_contest_name(), '東京大学プログラミングコンテスト2013')
self.assertEqual(contest.get_duration().total_seconds(), 5 * 60 * 60)
self.assertEqual(contest.get_rated_range(), 'All')
class AtCoderContestTest(unittest.TestCase):
def test_from_url(self):
self.assertEqual(AtCoderContest.from_url('https://kupc2014.contest.atcoder.jp/tasks/kupc2014_d').contest_id, 'kupc2014')
self.assertEqual(AtCoderContest.from_url('https://atcoder.jp/contests/agc030').contest_id, 'agc030')
self.assertIsNone(AtCoderContest.from_url('https://atcoder.jp/contests/'))
def test_load_details(self):
contest = AtCoderContest.from_url('https://atcoder.jp/contests/keyence2019')
self.assertEqual(contest.get_contest_name(lang='en'), 'KEYENCE Programming Contest 2019')
self.assertEqual(contest.get_contest_name(lang='ja'), 'キーエンス プログラミング コンテスト 2019')
self.assertEqual(contest.get_start_time().year, 2019)
self.assertEqual(contest.get_start_time().month, 1)
self.assertEqual(contest.get_start_time().day, 13)
self.assertEqual(contest.get_duration().total_seconds(), 2 * 60 * 60)
self.assertEqual(contest.get_can_participate(), 'All')
self.assertEqual(contest.get_rated_range(), ' ~ 2799')
self.assertEqual(contest.get_penalty().total_seconds(), 5 * 60)
contest = AtCoderContest.from_url('https://atcoder.jp/contests/dp')
self.assertEqual(contest.get_contest_name(lang='ja'), 'Educational DP Contest / DP まとめコンテスト')
self.assertEqual(contest.get_contest_name(lang='en'), 'Educational DP Contest')
self.assertEqual(contest.get_start_time().year, 2019)
self.assertEqual(contest.get_start_time().month, 1)
self.assertEqual(contest.get_start_time().day, 6)
self.assertEqual(contest.get_duration().total_seconds(), 5 * 60 * 60)
self.assertEqual(contest.get_can_participate(), 'All')
self.assertEqual(contest.get_rated_range(), '-')
self.assertEqual(contest.get_penalty().total_seconds(), 5 * 60)
def test_list_problems(self):
contest = AtCoderContest.from_url('https://atcoder.jp/contests/agc028')
problems = contest.list_problems()
self.assertEqual(len(problems), 7)
self.assertEqual(problems[0].get_alphabet(), 'A')
self.assertEqual(problems[0].get_task_name(), 'Two Abbreviations')
self.assertEqual(problems[0].get_time_limit_msec(), 2000)
self.assertEqual(problems[0].get_memory_limit_byte(), 1024 * 1000 * 1000)
self.assertEqual(problems[5].get_alphabet(), 'F')
self.assertEqual(problems[5].problem_id, 'agc028_f')
self.assertEqual(problems[6].get_alphabet(), 'F2')
self.assertEqual(problems[6].problem_id, 'agc028_f2')
def test_iterate_submissions(self):
contest = AtCoderContest.from_url('https://atcoder.jp/contests/code-festival-2014-exhibition-open')
submissions = list(contest.iterate_submissions())
self.assertGreater(len(submissions), 300)
self.assertEqual(submissions[0].get_code_size(), 276)
self.assertEqual(submissions[0].get_status(), 'WA')
self.assertEqual(submissions[1].get_user_id(), 'snuke')
self.assertEqual(submissions[1].get_status(), 'WA')
class AtCoderProblemTest(unittest.TestCase):
def test_from_url(self):
self.assertEqual(AtCoderProblem.from_url('https://kupc2014.contest.atcoder.jp/tasks/kupc2014_d').contest_id, 'kupc2014')
self.assertEqual(AtCoderProblem.from_url('https://kupc2014.contest.atcoder.jp/tasks/kupc2014_d').problem_id, 'kupc2014_d')
self.assertEqual(AtCoderProblem.from_url('https://atcoder.jp/contests/agc030/tasks/agc030_c').contest_id, 'agc030')
self.assertEqual(AtCoderProblem.from_url('https://atcoder.jp/contests/agc030/tasks/agc030_c').problem_id, 'agc030_c')
def test_load_details(self):
problem = AtCoderProblem.from_url('https://atcoder.jp/contests/abc118/tasks/abc118_a')
self.assertEqual(problem.get_alphabet(), 'A')
self.assertEqual(problem.get_task_name(), 'B +/- A')
self.assertEqual(problem.get_time_limit_msec(), 2000)
self.assertEqual(problem.get_memory_limit_byte(), 1024 * 1000 * 1000)
self.assertEqual(problem.get_score(), 100)
def test_get_alphabet(self):
self.assertEqual(AtCoderProblem.from_url('https://atcoder.jp/contests/agc028/tasks/agc028_f').get_alphabet(), 'F')
self.assertEqual(AtCoderProblem.from_url('https://atcoder.jp/contests/agc028/tasks/agc028_f2').get_alphabet(), 'F2')
def test_get_score(self):
self.assertEqual(AtCoderProblem.from_url('https://atcoder.jp/contests/future-contest-2018-final/tasks/future_contest_2018_final_a').get_score(), 50000000)
self.assertEqual(AtCoderProblem.from_url('https://atcoder.jp/contests/abc001/tasks/abc001_4').get_score(), None)
def test_iterate_submissions(self):
problem = AtCoderProblem.from_url('https://atcoder.jp/contests/abc119/tasks/abc119_c')
submissions = problem.iterate_submissions()
self.assertEqual(next(submissions).get_score(), 300)
self.assertEqual(next(submissions).get_code_size(), 1208)
self.assertEqual(next(submissions).get_exec_time_msec(), 2)
self.assertEqual(next(submissions).get_memory_byte(), 256 * 1000)
class AtCoderSubmissionTest(unittest.TestCase):
def test_from_url(self):
self.assertEqual(AtCoderSubmission.from_url('https://atcoder.jp/contests/kupc2012/submissions/2097011').contest_id, 'kupc2012')
self.assertEqual(AtCoderSubmission.from_url('https://atcoder.jp/contests/kupc2012/submissions/2097011').submission_id, 2097011)
self.assertEqual(AtCoderSubmission.from_url('https://qupc2014.contest.atcoder.jp/submissions/1444440').contest_id, 'qupc2014')
self.assertEqual(AtCoderSubmission.from_url('https://qupc2014.contest.atcoder.jp/submissions/1444440').submission_id, 1444440)
def test_submission_info(self):
submission = AtCoderSubmission.from_url('https://atcoder.jp/contests/agc030/submissions/3904911')
self.assertEqual(submission.get_submission_time().year, 2018)
self.assertEqual(submission.get_submission_time().month, 12)
self.assertEqual(submission.get_submission_time().day, 31)
self.assertEqual(submission.get_user_id(), 'kimiyuki')
self.assertEqual(submission.get_problem().problem_id, 'agc030_b')
self.assertEqual(submission.get_language_name(), 'C++14 (GCC 5.4.1)')
self.assertEqual(submission.get_score(), 800)
self.assertEqual(submission.get_code_size(), 1457)
self.assertEqual(submission.get_exec_time_msec(), 85)
self.assertEqual(submission.get_memory_byte(), 3328 * 1000)
def test_get_test_sets(self):
submission = AtCoderSubmission.from_url('https://atcoder.jp/contests/arc028/submissions/223928')
test_cases = submission.get_test_sets()
self.assertEqual(len(test_cases), 3)
self.assertEqual(test_cases[0].set_name, 'Sample')
self.assertEqual(test_cases[0].score, 0)
self.assertEqual(test_cases[0].max_score, 0)
self.assertEqual(test_cases[0].test_case_names, ['sample_01.txt', 'sample_02.txt'])
self.assertEqual(test_cases[1].set_name, 'Subtask1')
self.assertEqual(test_cases[1].score, 40)
self.assertEqual(test_cases[1].max_score, 40)
self.assertEqual(len(test_cases[1].test_case_names), 13)
self.assertEqual(test_cases[2].set_name, 'Subtask2')
self.assertEqual(test_cases[2].score, 0)
self.assertEqual(test_cases[2].max_score, 60)
self.assertEqual(len(test_cases[2].test_case_names), 20)
def test_get_test_cases(self):
submission = AtCoderSubmission.from_url('https://atcoder.jp/contests/tricky/submissions/119944')
test_cases = submission.get_test_cases()
self.assertEqual(len(test_cases), 2)
self.assertEqual(test_cases[0].case_name, 'input_01.txt')
self.assertEqual(test_cases[0].status, 'TLE')
self.assertEqual(test_cases[0].exec_time_msec, None)
self.assertEqual(test_cases[0].memory_byte, None)
self.assertEqual(test_cases[1].case_name, 'input_02.txt')
self.assertEqual(test_cases[1].status, 'AC')
self.assertEqual(test_cases[1].exec_time_msec, 131)
self.assertEqual(test_cases[1].memory_byte, 7400 * 1000)
def test_get_source_code(self):
submission = AtCoderSubmission.from_url('https://atcoder.jp/contests/abc100/submissions/3082514')
self.assertEqual(submission.get_source_code(), b'/9\\|\\B/c:(\ncYay!')
self.assertEqual(submission.get_code_size(), 16)
submission = AtCoderSubmission.from_url('https://atcoder.jp/contests/abc100/submissions/4069980')
self.assertEqual(submission.get_source_code(), b'/9\\|\\B/c:(\r\ncYay!')
self.assertEqual(submission.get_code_size(), 17)
submission = AtCoderSubmission.from_url('https://atcoder.jp/contests/abc100/submissions/4317534')
self.assertEqual(submission.get_source_code(), b'/9\\|\\B/c:(\r\ncYay!\r\n')
self.assertEqual(submission.get_code_size(), 19)
if __name__ == '__main__':
unittest.main()
| [
"kimiyuki95@gmail.com"
] | kimiyuki95@gmail.com |
6f3406da83fa8c0692c04a30a556eac010749755 | 95495baeb47fd40b9a7ecb372b79d3847aa7a139 | /test/test_prefer_life_time.py | e4414159a58df5761302ecb4da13ca46c2fb3b58 | [] | no_license | pt1988/fmc-api | b1d8ff110e12c13aa94d737f3fae9174578b019c | 075f229585fcf9bd9486600200ff9efea5371912 | refs/heads/main | 2023-01-07T09:22:07.685524 | 2020-10-30T03:21:24 | 2020-10-30T03:21:24 | 308,226,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,194 | py | # coding: utf-8
"""
Cisco Firepower Management Center Open API Specification
**Specifies the REST URLs and methods supported in the Cisco Firepower Management Center API. Refer to the version specific [REST API Quick Start Guide](https://www.cisco.com/c/en/us/support/security/defense-center/products-programming-reference-guides-list.html) for additional information.** # noqa: E501
OpenAPI spec version: 1.0.0
Contact: tac@cisco.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.prefer_life_time import PreferLifeTime # noqa: E501
from swagger_client.rest import ApiException
class TestPreferLifeTime(unittest.TestCase):
"""PreferLifeTime unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPreferLifeTime(self):
"""Test PreferLifeTime"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.prefer_life_time.PreferLifeTime() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"pt1988@gmail.com"
] | pt1988@gmail.com |
acc28517932c94119c7bdabb55a776e1622cd13c | 3637fe729395dac153f7abc3024dcc69e17f4e81 | /reference/ucmdb/discovery/plugins_weblogic_server_domain.py | 8f752e1bf03c9a7c22e458a8f667ce33641411a7 | [] | no_license | madmonkyang/cda-record | daced6846c2456f20dddce7f9720602d1583a02a | c431e809e8d0f82e1bca7e3429dd0245560b5680 | refs/heads/master | 2023-06-15T08:16:46.230569 | 2021-07-15T16:27:36 | 2021-07-15T16:27:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,831 | py | #coding=utf-8
from plugins import Plugin
from appilog.common.system.types.vectors import ObjectStateHolderVector
from appilog.common.system.types import ObjectStateHolder
import ip_addr
import netutils
import weblogic
import modeling
import weblogic_by_shell
import jee
import file_system
from java.lang import Exception as JException
import weblogic_discoverer
import logger
class WeblogicPlugin:
def __init__(self):
Plugin.__init__(self)
def getProcessName(self):
raise NotImplementedError()
def isApplicable(self, context):
return context.application.getProcess(self.getProcessName()) is not None
def process(self, context):
self.enrichAppServerOsh(context, self.getProcessName())
def enrichAppServerOsh(self, context, processName):
r'''Goal of this is to set for reported Weblogic AS
- administrative domain name
- application type as Application Server (AS)
@types: applications.ApplicationSignatureContext, str
'''
# @types: ProcessObject
process = context.application.getProcess(processName)
# compose function to get process by PID required to get
# domain root directory path
appComponent = context.application.getApplicationComponent()
applicationSignature = appComponent.getApplicationSignature()
processInfoManager = applicationSignature.getProcessesManager()
# here it is - function accept PID and returns process or None
getProcessByPid = (processInfoManager
and processInfoManager.getProcessByPid
or (lambda *args: None)
)
# first of all set application type as AS for the server OSH
serverOsh = context.application.getOsh()
modeling.setAppServerType(serverOsh)
# initialize required data
loadExternalDtd = 0
shell = context.client # for shell jobs we have shellutils.Shell instance
fs = file_system.createFileSystem(shell)
servers = None
try:
# find out path of domain root directory
domainRootPath = weblogic_by_shell.getDomainRootDirPath(shell, fs, process,
getProcessByPid)
except:
logger.debug("Domain root directory path cannot be found from the runtime information.")
return
try:
domainLayout = weblogic_discoverer.createDomainLayout(fs, domainRootPath)
parser = weblogic_discoverer.createDomainConfigParserByLayout(domainLayout, loadExternalDtd)
domainDescriptorFile = domainLayout.getFileContent(
domainLayout.getDomainConfigFilePath()
)
domainDescriptor = parser.parseConfiguration(domainDescriptorFile.content)
except ValueError, ex:
logger.reportWarning("Not supported DomainLayout and so weblogic discovery will be partial")
logger.debugException("Not supported DomainLayout and so weblogic discovery will be partial")
except (Exception, JException):
logger.warnException("Failed to process config.xml")
else:
# get version of the platform
versionInfo = domainDescriptor.versionInfo
logger.info("Platform version is %s" % versionInfo)
domainName = domainDescriptor.getName()
# update server administrative domain attribute
modeling.setJ2eeServerAdminDomain(serverOsh, domainName)
servers = domainDescriptor.getServers()
for server in servers:
if server.getName() == serverOsh.getAttributeValue('name'):
serverFullName = jee.ServerTopologyBuilder()._composeFullName(server)
serverOsh.setAttribute('j2eeserver_fullname', serverFullName)
break
##reportEndpointByConfigFile
self.reportEndpointByConfigFile(context,shell,servers)
def reportEndpointByConfigFile(self,context,shell,servers):
logger.debug("reporting endpoints for weblogic using configfile")
endpointOSHV = ObjectStateHolderVector()
for server in servers:
serverRole = server.getRole(weblogic.ServerRole)
port = None
if serverRole:
port = serverRole.getPort()
host = server.address
ip = None
if port:
if not host or host == '*' or host == '127.0.0.1':
if context.application.getApplicationIp():
ip = context.application.getApplicationIp()
elif netutils.isValidIp(host):
ip = host
else:
ip = netutils.resolveIP(shell,host)
endpoint = netutils.Endpoint(port, netutils.ProtocolType.TCP_PROTOCOL, ip)
endpointOSH = modeling.createIpServerOSH(endpoint)
hostosh = modeling.createHostOSH(ip)
endpointOSH.setContainer(hostosh)
if server.getName() == context.application.getOsh().getAttributeValue('name'):
linkOsh = modeling.createLinkOSH("usage", context.application.getOsh(), endpointOSH)
endpointOSHV.add(linkOsh)
endpointOSHV.add(endpointOSH)
logger.debug('Get ip using configfile config.xml:',ip)
logger.debug('Get port using configfile config.xml:', port)
if endpointOSHV:
context.resultsVector.addAll(endpointOSHV)
class WeblogicServerDomainPluginWindows(WeblogicPlugin, Plugin):
def getProcessName(self):
return 'java.exe'
class WeblogicServerDomainPluginUnix(WeblogicPlugin, Plugin):
def getProcessName(self):
return 'java'
| [
"silentbalanceyh@126.com"
] | silentbalanceyh@126.com |
834f4414c63dd5c819065c0268b730a14f7cb13b | 14373275670c1f3065ce9ae195df142146e2c1a4 | /stubs/influxdb-client/influxdb_client/domain/import_declaration.pyi | 559312f7d5ec4379c951710d415c7ad89c794866 | [
"Apache-2.0",
"MIT"
] | permissive | sobolevn/typeshed | eb7af17c06a9722f23c337e6b9a4726223155d58 | d63a82640390a9c130e0fe7d409e8b0b836b7c31 | refs/heads/master | 2023-08-04T05:59:29.447015 | 2023-06-14T21:27:53 | 2023-06-14T21:27:53 | 216,265,622 | 2 | 0 | Apache-2.0 | 2022-02-08T10:40:53 | 2019-10-19T20:21:25 | Python | UTF-8 | Python | false | false | 585 | pyi | from _typeshed import Incomplete
class ImportDeclaration:
openapi_types: Incomplete
attribute_map: Incomplete
discriminator: Incomplete
def __init__(self, type: Incomplete | None = None, _as: Incomplete | None = None, path: Incomplete | None = None) -> None: ...
@property
def type(self): ...
@type.setter
def type(self, type) -> None: ...
@property
def path(self): ...
@path.setter
def path(self, path) -> None: ...
def to_dict(self): ...
def to_str(self): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
| [
"noreply@github.com"
] | sobolevn.noreply@github.com |
f8d0456a377fac7fe3c6207cc8e28e80690af499 | 0ca435ec92edb5c580c994c29dcab552bfdbc817 | /setup.py | 4987fdd5cddad4d35abe5c3b3eeac2821ff7d15f | [
"BSD-3-Clause"
] | permissive | gabrielhurley/django-wymeditor | 4a28027ce4e5a41ae42cb9c2923c14846fd51dcf | 1f45ce696688ee6e849e8dfafd7d55a1bcd4b045 | refs/heads/master | 2020-06-05T00:11:58.352380 | 2013-10-23T21:05:03 | 2013-10-23T21:05:03 | 1,621,672 | 0 | 2 | BSD-3-Clause | 2018-08-13T22:44:22 | 2011-04-16T01:48:23 | JavaScript | UTF-8 | Python | false | false | 983 | py | from setuptools import setup, find_packages
setup(name='django-wymeditor',
version='1.0',
description='A Django application that contains a widget to render a form field with a WYMEditor interface.',
long_description=open('README.rst').read(),
author='Gabriel Hurley',
author_email='gabriel@strikeawe.com',
license='BSD',
url='https://github.com/gabrielhurley/django-wymeditor',
download_url='git://github.com/gabrielhurley/django-wymeditor.git',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'
],
) | [
"gabriel@strikeawe.com"
] | gabriel@strikeawe.com |
2f62aa7e8a4bdb96e448450594d045f0ccf6d831 | 907b3bbd44c95be1542a36feaadb6a71b724579f | /files/usr/google-cloud-sdk/.install/.backup/lib/surface/compute/scp.py | cd2981521d71cab0a31aa56c33a8dabdd0d6aa96 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | vo0doO/com.termux | 2d8f536c1a5dbd7a091be0baf181e51f235fb941 | c97dd7b906e5ef3ec157581fd0bcadd3e3fc220e | refs/heads/master | 2020-12-24T09:40:30.612130 | 2016-11-21T07:47:25 | 2016-11-21T07:47:25 | 73,282,539 | 2 | 2 | null | 2020-07-24T21:33:03 | 2016-11-09T12:33:01 | Python | UTF-8 | Python | false | false | 6,781 | py | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the command for copying files from and to virtual machines."""
import collections
from googlecloudsdk.api_lib.compute import ssh_utils
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
RemoteFile = collections.namedtuple(
'RemoteFile', ['user', 'instance_name', 'file_path'])
LocalFile = collections.namedtuple(
'LocalFile', ['file_path'])
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA)
class Scp(ssh_utils.BaseSSHCLICommand):
"""Copy files to and from Google Compute Engine virtual machines."""
@staticmethod
def Args(parser):
ssh_utils.BaseSSHCLICommand.Args(parser)
parser.add_argument(
'--port',
help='The port to connect to.')
parser.add_argument(
'--recurse',
action='store_true',
help='Upload directories recursively.')
parser.add_argument(
'--compress',
action='store_true',
help='Enable compression.')
parser.add_argument(
'--scp-flag',
action='append',
help='Extra flag to be sent to scp. This flag may be repeated.')
parser.add_argument(
'sources',
help='Specifies the files to copy.',
metavar='[[USER@]INSTANCE:]SRC',
nargs='+')
parser.add_argument(
'destination',
help='Specifies a destination for the source files.',
metavar='[[USER@]INSTANCE:]DEST')
# TODO(b/21515936): Use flags.AddZoneFlag when copy_files supports URIs
zone = parser.add_argument(
'--zone',
help='The zone of the instance to copy files to/from.',
action=actions.StoreProperty(properties.VALUES.compute.zone))
zone.detailed_help = (
'The zone of the instance to copy files to/from.\n\n' +
flags.ZONE_PROPERTY_EXPLANATION)
def Run(self, args):
super(Scp, self).Run(args)
file_specs = []
# Parses the positional arguments.
for arg in args.sources + [args.destination]:
if ssh_utils.IsScpLocalPath(arg):
file_specs.append(LocalFile(arg))
else:
user_host, file_path = arg.split(':', 1)
user_host_parts = user_host.split('@', 1)
if len(user_host_parts) == 1:
user = ssh_utils.GetDefaultSshUsername(warn_on_account_user=True)
instance = user_host_parts[0]
else:
user, instance = user_host_parts
file_specs.append(RemoteFile(user, instance, file_path))
log.debug('Normalized arguments: %s', file_specs)
# Validates the positional arguments.
# TODO(b/21515495): Look into relaxing these conditions.
sources = file_specs[:-1]
destination = file_specs[-1]
if isinstance(destination, LocalFile):
for source in sources:
if isinstance(source, LocalFile):
raise exceptions.ToolException(
'All sources must be remote files when the destination '
'is local.')
else: # RemoteFile
for source in sources:
if isinstance(source, RemoteFile):
raise exceptions.ToolException(
'All sources must be local files when the destination '
'is remote.')
instances = set()
for file_spec in file_specs:
if isinstance(file_spec, RemoteFile):
instances.add(file_spec.instance_name)
if len(instances) > 1:
raise exceptions.ToolException(
'Copies must involve exactly one virtual machine instance; '
'your invocation refers to [{0}] instances: [{1}].'.format(
len(instances), ', '.join(sorted(instances))))
instance_ref = self.CreateZonalReference(instances.pop(), args.zone)
instance = self.GetInstance(instance_ref)
external_ip_address = ssh_utils.GetExternalIPAddress(instance)
# Builds the scp command.
scp_args = [self.scp_executable]
if not args.plain:
scp_args.extend(self.GetDefaultFlags())
scp_args.extend(self.GetHostKeyArgs(args, instance))
# apply args
if args.quiet:
scp_args.append('-q')
if args.port:
scp_args.extend(['-P', args.port])
if args.recurse:
scp_args.append('-r')
if args.compress:
scp_args.append('-C')
if args.scp_flag:
scp_args.extend(args.scp_flag)
for file_spec in file_specs:
if isinstance(file_spec, LocalFile):
scp_args.append(file_spec.file_path)
else:
scp_args.append('{0}:{1}'.format(
ssh_utils.UserHost(file_spec.user, external_ip_address),
file_spec.file_path))
self.ActuallyRun(args, scp_args, user, instance)
Scp.detailed_help = {
'brief': 'Copy files to and from Google Compute Engine virtual machines '
'via scp',
'DESCRIPTION': """\
*{command}* copies files between a virtual machine instance
and your local machine using the scp command.
To denote a remote file, prefix the file name with the virtual
machine instance name (e.g., _example-instance_:~/_FILE_). To
denote a local file, do not add a prefix to the file name
(e.g., ~/_FILE_). For example, to copy a remote directory
to your local host, run:
$ {command} example-instance:~/REMOTE-DIR ~/LOCAL-DIR --zone us-central1-a
In the above example, ``~/REMOTE-DIR'' from ``example-instance'' is
copied into the ~/_LOCAL-DIR_ directory.
Conversely, files from your local computer can be copied to a
virtual machine:
$ {command} ~/LOCAL-FILE-1 ~/LOCAL-FILE-2 example-instance:~/REMOTE-DIR --zone us-central1-a
If a file contains a colon (``:''), you must specify it by
either using an absolute path or a path that begins with
``./''.
Under the covers, *scp(1)* or pscp (on Windows) is used to facilitate the transfer.
When the destination is local, all sources must be the same
virtual machine instance. When the destination is remote, all
source must be local.
""",
}
| [
"kirsanov.bvt@gmail.com"
] | kirsanov.bvt@gmail.com |
79bf5943ed0355ff765fec1898ec3a9f95176f7d | b3c47795e8b6d95ae5521dcbbb920ab71851a92f | /Nowcoder/剑指Offer/重建二叉树.py | c113259f8d4086abb6711c6d00a80c6348dd2027 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | Wizmann/ACM-ICPC | 6afecd0fd09918c53a2a84c4d22c244de0065710 | 7c30454c49485a794dcc4d1c09daf2f755f9ecc1 | refs/heads/master | 2023-07-15T02:46:21.372860 | 2023-07-09T15:30:27 | 2023-07-09T15:30:27 | 3,009,276 | 51 | 23 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | # -*- coding:utf-8 -*-
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# 返回构造的TreeNode根节点
def reConstructBinaryTree(self, pre, tin):
if not pre:
return None
rv = pre.pop(0)
ri = tin.index(rv)
root = TreeNode(rv)
root.left = self.reConstructBinaryTree(pre[:ri], tin[:ri])
root.right = self.reConstructBinaryTree(pre[ri:], tin[ri + 1:])
return root
| [
"noreply@github.com"
] | Wizmann.noreply@github.com |
940fbf84801f94dd628850923b61ce9e97a7efe4 | 61c109344f8bcb822168152cc16c11d05cf6aa70 | /django_coverage_plugin/plugin.py | 50800f39f54eebb095f6dc3e480f973be846851b | [] | no_license | rfleschenberg/django_coverage_plugin | 61a291b0321cb2c3b78ec8bb6719fb550ca35d44 | ef46934b55043dbbb8f6a21f60e26477ce7fc8ec | refs/heads/master | 2021-01-18T20:27:34.806564 | 2015-01-16T00:09:28 | 2015-01-16T00:09:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,464 | py | """The Django coverage plugin."""
from __future__ import print_function, unicode_literals
import os.path
from six.moves import range
import coverage.plugin
import django
from django.template import Lexer, TextNode
from django.template.base import TOKEN_MAPPING
from django.template import TOKEN_BLOCK, TOKEN_TEXT, TOKEN_VAR
SHOW_PARSING = False
SHOW_TRACING = False
if 0:
from blessed import Terminal
t = Terminal()
# TODO: Add a check for TEMPLATE_DEBUG, and make noise if it is false.
class Plugin(coverage.plugin.CoveragePlugin, coverage.plugin.FileTracer):
def __init__(self, options):
super(Plugin, self).__init__(options)
self.django_dir = os.path.dirname(django.__file__)
self.django_template_dir = os.path.join(self.django_dir, "template")
self.source_map = {}
# --- CoveragePlugin methods
def file_tracer(self, filename):
if filename.startswith(self.django_template_dir):
if "templatetags" not in filename:
return self
return None
def file_reporter(self, filename):
return FileReporter(filename)
# --- FileTracer methods
def has_dynamic_source_filename(self):
return True
def dynamic_source_filename(self, filename, frame):
if frame.f_code.co_name != 'render':
return None
locals = frame.f_locals
render_self = locals['self']
if 0:
dump_frame(frame)
try:
source = render_self.source
origin = source[0]
filename = origin.name
return filename
except (AttributeError, IndexError):
pass
return None
def line_number_range(self, frame):
assert frame.f_code.co_name == 'render'
render_self = frame.f_locals['self']
source = render_self.source
if SHOW_TRACING:
print("{!r}: {}".format(render_self, source))
s_start, s_end = source[1]
if isinstance(render_self, TextNode):
text = render_self.s
first_line = text.splitlines(True)[0]
if first_line.isspace():
s_start += len(first_line)
line_map = self.get_line_map(source[0].name)
start = get_line_number(line_map, s_start)
end = get_line_number(line_map, s_end-1)
if start < 0 or end < 0:
return -1, -1
return start, end
# --- FileTracer helpers
def get_line_map(self, filename):
"""The line map for `filename`.
A line map is a list of character offsets, indicating where each line
in the text begins. For example, a line map like this::
[13, 19, 30]
means that line 2 starts at character 13, line 3 starts at 19, etc.
Line 1 always starts at character 0.
"""
if filename not in self.source_map:
with open(filename) as template_file:
template_source = template_file.read()
if 0: # change to see the template text
for i in range(0, len(template_source), 10):
print("%3d: %r" % (i, template_source[i:i+10]))
self.source_map[filename] = make_line_map(template_source)
return self.source_map[filename]
class FileReporter(coverage.plugin.FileReporter):
def __init__(self, filename):
# TODO: do we want the .filename attribute to be part of the public
# API of the coverage plugin?
self.filename = filename
# TODO: is self.name required? Can the base class provide it somehow?
self.name = os.path.basename(filename)
# TODO: html filenames are absolute.
def statements(self):
source_lines = set()
if SHOW_PARSING:
print("-------------- {}".format(self.filename))
with open(self.filename) as f:
text = f.read()
tokens = Lexer(text, self.filename).tokenize()
# Are we inside a comment?
comment = False
# Is this a template that extends another template?
extends = False
# Are we inside a block?
inblock = False
for token in tokens:
if SHOW_PARSING:
print(
"%10s %2d: %r" % (
TOKEN_MAPPING[token.token_type],
token.lineno,
token.contents,
)
)
if token.token_type == TOKEN_BLOCK:
if token.contents == 'endcomment':
comment = False
continue
if comment:
continue
if token.token_type == TOKEN_BLOCK:
if token.contents.startswith("endblock"):
inblock = False
elif token.contents.startswith("block"):
inblock = True
if extends:
continue
if token.contents == 'comment':
comment = True
if token.contents.startswith("end"):
continue
elif token.contents in ("else", "empty"):
continue
elif token.contents.startswith("elif"):
# NOTE: I don't like this, I want to be able to trace elif
# nodes, but the Django template engine doesn't track them
# in a way that we can get useful information from them.
continue
elif token.contents.startswith("extends"):
extends = True
source_lines.add(token.lineno)
elif token.token_type == TOKEN_VAR:
source_lines.add(token.lineno)
elif token.token_type == TOKEN_TEXT:
if extends and not inblock:
continue
# Text nodes often start with newlines, but we don't want to
# consider that first line to be part of the text.
lineno = token.lineno
lines = token.contents.splitlines(True)
num_lines = len(lines)
if lines[0].isspace():
lineno += 1
num_lines -= 1
source_lines.update(range(lineno, lineno+num_lines))
if SHOW_PARSING:
print("\t\t\tNow source_lines is: {!r}".format(source_lines))
return source_lines
def running_sum(seq):
total = 0
for num in seq:
total += num
yield total
def make_line_map(text):
line_lengths = [len(l) for l in text.splitlines(True)]
line_map = list(running_sum(line_lengths))
return line_map
def get_line_number(line_map, offset):
"""Find a line number, given a line map and a character offset."""
for lineno, line_offset in enumerate(line_map, start=1):
if line_offset > offset:
return lineno
return -1
def dump_frame(frame):
"""Dump interesting information about this frame."""
locals = frame.f_locals
self = locals.get('self', None)
if "__builtins__" in locals:
del locals["__builtins__"]
print("-- frame -----------------------")
print("{}:{}:{}".format(
os.path.basename(frame.f_code.co_filename),
frame.f_lineno,
type(self),
))
print(locals)
if self:
print("self:", self.__dict__)
| [
"ned@nedbatchelder.com"
] | ned@nedbatchelder.com |
be12fe914a5b9c2f4e1eab08f952edf9dd0ae710 | 6929a33a7259dad9b45192ca088a492085ed2953 | /solutions/0475-heaters/heaters.py | 2c9655d2847642ea56cf9df3c164610f4b6ae8c5 | [] | no_license | moqi112358/leetcode | 70366d29c474d19c43180fd4c282cc02c890af03 | fab9433ff7f66d00023e3af271cf309b2d481722 | refs/heads/master | 2022-12-10T01:46:14.799231 | 2021-01-14T05:00:09 | 2021-01-14T05:00:09 | 218,163,960 | 3 | 0 | null | 2022-07-06T20:26:38 | 2019-10-28T23:26:47 | Python | UTF-8 | Python | false | false | 3,273 | py | # Winter is coming! During the contest, your first job is to design a standard heater with a fixed warm radius to warm all the houses.
#
# Every house can be warmed, as long as the house is within the heater's warm radius range.
#
# Given the positions of houses and heaters on a horizontal line, return the minimum radius standard of heaters so that those heaters could cover all houses.
#
# Notice that all the heaters follow your radius standard, and the warm radius will the same.
#
#
# Example 1:
#
#
# Input: houses = [1,2,3], heaters = [2]
# Output: 1
# Explanation: The only heater was placed in the position 2, and if we use the radius 1 standard, then all the houses can be warmed.
#
#
# Example 2:
#
#
# Input: houses = [1,2,3,4], heaters = [1,4]
# Output: 1
# Explanation: The two heater was placed in the position 1 and 4. We need to use radius 1 standard, then all the houses can be warmed.
#
#
# Example 3:
#
#
# Input: houses = [1,5], heaters = [2]
# Output: 3
#
#
#
# Constraints:
#
#
# 1 <= houses.length, heaters.length <= 3 * 104
# 1 <= houses[i], heaters[i] <= 109
#
#
#
# @lc app=leetcode id=475 lang=python3
#
# [475] Heaters
#
# https://leetcode.com/problems/heaters/description/
#
# algorithms
# Easy (32.21%)
# Total Accepted: 52K
# Total Submissions: 161.3K
# Testcase Example: '[1,2,3]\n[2]'
#
# Winter is coming! Your first job during the contest is to design a standard
# heater with fixed warm radius to warm all the houses.
#
# Now, you are given positions of houses and heaters on a horizontal line, find
# out minimum radius of heaters so that all houses could be covered by those
# heaters.
#
# So, your input will be the positions of houses and heaters seperately, and
# your expected output will be the minimum radius standard of heaters.
#
# Note:
#
#
# Numbers of houses and heaters you are given are non-negative and will not
# exceed 25000.
# Positions of houses and heaters you are given are non-negative and will not
# exceed 10^9.
# As long as a house is in the heaters' warm radius range, it can be
# warmed.
# All the heaters follow your radius standard and the warm radius will the
# same.
#
#
#
#
# Example 1:
#
#
# Input: [1,2,3],[2]
# Output: 1
# Explanation: The only heater was placed in the position 2, and if we use the
# radius 1 standard, then all the houses can be warmed.
#
#
#
#
# Example 2:
#
#
# Input: [1,2,3,4],[1,4]
# Output: 1
# Explanation: The two heater was placed in the position 1 and 4. We need to
# use radius 1 standard, then all the houses can be warmed.
#
#
#
#
#
class Solution:
def findRadius(self, houses, heaters):
# return max([min([abs(j-i) for j in heaters]) for i in houses])
curr = 0
houses.sort()
heaters.sort()
total_heaters = len(heaters)
total_houses = len(houses)
res = -sys.maxsize - 1
for i in range(total_houses):
dist1 = abs(heaters[curr] - houses[i])
while curr != total_heaters - 1 and (abs(heaters[curr + 1] - houses[i]) <= dist1):
curr += 1
dist1 = abs(heaters[curr] - houses[i])
res = max([res, dist1])
return res
| [
"983028670@qq.com"
] | 983028670@qq.com |
72ab34403f47a895dc8cf4d64e46acb9915bb8c6 | e9462eeebcac761b34722578f9b297af858710bc | /python/python-vba-word/Win32COM - Word - Table Formulas.py | cb6efd70d8d489073bce97292edec2b13ebb0a24 | [
"MIT"
] | permissive | nsethtyler/sigma_coding_youtube | 63e72c8f3f144731e6c49e049ed786f2b3219e73 | c09f131816bc2d1c1eb45ab9da6de75ee7b855db | refs/heads/master | 2022-12-09T12:03:34.757133 | 2020-08-27T20:50:04 | 2020-08-27T20:50:04 | 290,873,388 | 0 | 0 | MIT | 2020-08-27T20:20:10 | 2020-08-27T20:20:09 | null | UTF-8 | Python | false | false | 2,321 | py | import win32com.client as win32
# Grab the Active Instance of Word
WrdApp = win32.GetActiveObject("Word.Application")
# Grab the current document.
WrdDoc = WrdApp.ActiveDocument
# Reference the Table in it.
WrdTable = WrdDoc.Tables.Item(1)
# Grab all the columns
SaleColumn = WrdTable.Columns(1)
CostColumn = WrdTable.Columns(2)
ProfitColumn = WrdTable.Columns(3)
# Loop through each cell in the Sales Column.
for SaleCell in list(SaleColumn.Cells)[1:]:
# Grab the Text
SaleCellText = SaleCell.Range.Text
# Clear out the old text
SaleCell.Range.Text = ""
# Create a Formula String
formula_string = "={my_number}\#""$#,##0.00;($#,##0.00)""".format(my_number = SaleCellText)
# Create the Range
SaleCell.Range.Select()
# Collapse the Range
WrdApp.Selection.Collapse(Direction=1)
# Define the new Selection Range
SelecRng = WrdApp.Selection.Range
# Set the Formula
SelecRng.Fields.Add(Range=SelecRng, Type=-1, Text=formula_string, PreserveFormatting=True)
# Loop through each cell in the Cost Column.
for CostCell in list(CostColumn.Cells)[1:]:
# Grab the Text
CostCellText = CostCell.Range.Text
# Clear the Original Text
CostCell.Range.Text = ""
# Create a Formula String
formula_string = "={my_number}\#""$#,##0.00;($#,##0.00)""".format(my_number = SaleCellText)
# Create the Range
CostCell.Range.Select()
# Collapse the Range
WrdApp.Selection.Collapse(Direction=1)
# Define the new Selection Range
SelecRng = WrdApp.Selection.Range
# Set the Formula
SelecRng.Fields.Add(Range=SelecRng, Type=-1, Text=formula_string, PreserveFormatting=True)
# Loop through each cell in the Profit Column.
for ProfitCell in list(ProfitColumn.Cells)[1:]:
# Clear the Original Text
ProfitCell.Range.Text = ""
# Create a Formula String
formula_string = "=R{row_number}C1 - R{row_number}C2 \#""$#,##0.00;($#,##0.00)""".format(row_number = ProfitCell.Row.Index)
# Create the Range
ProfitCell.Range.Select()
# Collapse the Range
WrdApp.Selection.Collapse(Direction=1)
# Define the new Selection Range
SelecRng = WrdApp.Selection.Range
# Set the Formula
SelecRng.Fields.Add(Range=SelecRng, Type=-1, Text=formula_string, PreserveFormatting=True)
| [
"alexreed1192@gmail.com"
] | alexreed1192@gmail.com |
c8665f57e8b014bb4a0bada9bd7400187cc6c773 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /kxrhqiE5so3AMXWS7_1.py | a567e54546179cbab5298d2f0a11665f16f9e8cf | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | """
A man has `n` number of apples. If he eats a percentage `p` of the apples (if
apples are available), his children will share the remainder of the apples.
Create a function to determine the number of 'whole' apples his children got.
If his children did not get any apples, return `"The children didn't get any
apples"`.
### Examples
get_number_of_apples(10, "90%") ➞ 1
get_number_of_apples(25, "10%") ➞ 22
get_number_of_apples(0, "10%") ➞ "The children didn't get any apples"
### Notes
`p` will always be given.
"""
def get_number_of_apples(n, p):
return n * (100 - int(p[:-1])) // 100 or "The children didn't get any apples"
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
0523861ca98c57a5e70824cf61ce73b08eb0213c | 048c4c7a0a7956e976a0cd0512ca9536c8aeb82d | /tefla/core/special_fn.py | 055fc21d756d455ee899cfcc494de399f599af5f | [
"MIT"
] | permissive | mkulariya1/tefla | 40d41242f08b4431a08f7dc6680088a234da5191 | 8de25c1b67dcf025535f5e8c40539de59acd7fb8 | refs/heads/master | 2020-04-24T15:46:51.866942 | 2019-02-04T18:33:49 | 2019-02-04T18:33:49 | 172,082,029 | 0 | 0 | NOASSERTION | 2019-02-22T14:41:53 | 2019-02-22T14:41:53 | null | UTF-8 | Python | false | false | 54,477 | py | import re
import functools
from collections import defaultdict
import contextlib
import random
import tensorflow as tf
from tensorflow.python.framework import function
from .layers import dilated_conv2d, conv1d, layer_norm, _layer_norm_compute_python, \
_collect_named_outputs
from .optimizer import VariableClippingOptimizer
from . import initializers as initz
_function_cache = {}
def fn_with_custom_grad(grad_fn, use_global_vars=False):
"""Decorator to create a subgraph with a custom gradient function.
The subgraph created by the decorated function is NOT put in a Defun and so
does not suffer from the limitations of the Defun (all subgraph ops on the
same device, no summaries).
Args:
grad_fn: function with signature
(inputs, variables, outputs, output_grads) -> (grad_inputs, grad_vars),
all of which are lists of Tensors.
use_global_vars: if True, variables will be the global variables created.
If False, will be the trainable variables.
Returns:
Decorator for function such that the gradient is defined by grad_fn.
"""
def dec(fn):
def wrapped(*args):
return _fn_with_custom_grad(fn, args, grad_fn, use_global_vars=use_global_vars)
return wrapped
return dec
def _fn_with_custom_grad(fn, inputs, grad_fn, use_global_vars=False):
"""Create a subgraph with a custom gradient.
Args:
fn: function that takes inputs as arguments and produces 1 or more Tensors.
inputs: list<Tensor>, will be passed as fn(*inputs).
grad_fn: function with signature
(inputs, vars, outputs, output_grads) -> (grad_inputs, grad_vars),
all of which are lists of Tensors.
use_global_vars: if True, variables will be the global variables created.
If False, will be the trainable variables.
Returns:
fn(*inputs)
"""
with tf.variable_scope(None, default_name="fn_with_custom_grad") as vs:
inputs = list(inputs)
outputs = fn(*inputs)
if use_global_vars:
train_vars = list(vs.global_variables())
else:
train_vars = list(vs.trainable_variables())
if grad_fn is None:
return outputs
else:
if not (isinstance(outputs, tuple) or isinstance(outputs, list)):
outputs = [outputs]
outputs = list(outputs)
in_types = [t.dtype for t in inputs]
out_types = [t.dtype for t in outputs]
var_types = [t.dtype for t in train_vars]
def custom_grad_fn(op, *dys):
"""Custom grad fn applying grad_fn for identity Defun."""
dys = list(dys)
fn_inputs = op.inputs[:len(inputs)]
fn_vars = op.inputs[len(inputs):len(inputs) + len(train_vars)]
fn_outputs = op.inputs[len(inputs) + len(train_vars):]
assert len(fn_outputs) == len(outputs)
assert len(fn_outputs) == len(dys)
grad_inputs, grad_vars = grad_fn(fn_inputs, fn_vars, fn_outputs, dys)
grad_outputs = [None] * len(fn_outputs)
return tuple(grad_inputs + grad_vars + grad_outputs)
# The Defun takes as input the original inputs, the trainable variables
# created in fn, and the outputs. In the forward it passes through the
# outputs. In the backwards, it produces gradients for the original inputs
# and the trainable variables.
@function.Defun(
*(in_types + var_types + out_types),
func_name="identity_custom_grad%d" % random.randint(1, 10**9),
python_grad_func=custom_grad_fn,
shape_func=lambda _: [t.get_shape() for t in outputs])
def identity(*args):
outs = args[len(inputs) + len(train_vars):]
return tuple([tf.identity(t) for t in outs])
id_out = identity(*(inputs + train_vars + outputs))
return id_out
def format_input_left_padding(inputs, **kwargs):
static_shape = inputs.get_shape()
if not static_shape or len(static_shape) != 4:
raise ValueError("Inputs to conv must have statically known rank 4. Shape: " + str(static_shape))
dilation = (1, 1)
assert kwargs['filter_size'] is not None
filter_size = kwargs['filter_size']
if isinstance(filter_size, int):
filter_size = [filter_size, filter_size]
if "dilation" in kwargs:
dilation_rate = kwargs["dilation"]
assert filter_size[0] % 2 == 1 and filter_size[1] % 2 == 1
height_padding = 2 * (filter_size[0] // 2) * dilation[0]
cond_padding = tf.cond(
tf.equal(tf.shape(inputs)[2], 1), lambda: tf.constant(0),
lambda: tf.constant(2 * (filter_size[1] // 2) * dilation[1]))
width_padding = 0 if static_shape[2] == 1 else cond_padding
padding = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
inputs = tf.pad(inputs, padding)
# Set middle two dimensions to None to prevent convolution from complaining
inputs.set_shape([static_shape[0], None, None, static_shape[3]])
kwargs["padding"] = "VALID"
return inputs, kwargs
def saturating_sigmoid(x):
"""Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1]."""
with tf.name_scope("saturating_sigmoid", [x]):
y = tf.sigmoid(x)
return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1))
def hard_sigmoid(x, saturation_limit=0.9):
saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))
x_shifted = 0.5 * x + 0.5
return tf.minimum(1.0, tf.nn.relu(x_shifted)), saturation_cost
def hard_tanh(x, saturation_limit=0.9):
saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))
return tf.minimum(1.0, tf.maximum(x, -1.0)), saturation_cost
def shift_right(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])[:, :-1, :, :]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :, :]
return shifted_targets
def shift_right_3d(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :]
return shifted_targets
def shift_right_2d(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0]])[:, :-1]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1]
return shifted_targets
@function.Defun(
python_grad_func=lambda x, dy: tf.convert_to_tensor(dy),
shape_func=lambda op: [op.inputs[0].get_shape()])
def convert_gradient_to_tensor(x):
"""Identity operation whose gradient is converted to a `Tensor`.
Currently, the gradient to `tf.concat` is particularly expensive to
compute if dy is an `IndexedSlices` (a lack of GPU implementation
forces the gradient operation onto CPU). This situation occurs when
the output of the `tf.concat` is eventually passed to `tf.gather`.
It is sometimes faster to convert the gradient to a `Tensor`, so as
to get the cheaper gradient for `tf.concat`. To do this, replace
`tf.concat(x)` with `convert_gradient_to_tensor(tf.concat(x))`.
Args:
x: A `Tensor`.
Returns:
The input `Tensor`.
"""
return x
def top_k_gpu(x, k):
"""GPU-compatible version of top-k that works for very small constant k.
Calls argmax repeatedly.
tf.nn.top_k is implemented for GPU, but the gradient, sparse_to_dense,
seems not to be, so if we use tf.nn.top_k, then both the top_k and its
gradient go on cpu. Once this is not an issue, this function becomes
obselete and should be replaced by tf.nn.top_k.
Args:
x: a 2d Tensor.
k: a small integer.
Returns:
values: a Tensor of shape [batch_size, k]
indices: a int32 Tensor of shape [batch_size, k]
"""
if k > 10:
return tf.nn.top_k(x, k)
values = []
indices = []
depth = tf.shape(x)[1]
for i in range(k):
values.append(tf.reduce_max(x, 1))
argmax = tf.argmax(x, 1)
indices.append(argmax)
if i + 1 < k:
x += tf.one_hot(argmax, depth, -1e9)
return tf.stack(values, axis=1), tf.to_int32(tf.stack(indices, axis=1))
def conv2d_v2(inputs, n_output_channels, is_training, reuse, **kwargs):
"""Adds a 2D dilated convolutional layer.
also known as convolution with holes or atrous convolution.
If the rate parameter is equal to one, it performs regular 2-D convolution.
If the rate parameter
is greater than one, it performs convolution with holes, sampling the input
values every rate pixels in the height and width dimensions.
`convolutional layer` creates a variable called `weights`, representing a conv
weight matrix, which is multiplied by the `x` to produce a
`Tensor` of hidden units. If a `batch_norm` is provided (such as
`batch_norm`), it is then applied. Otherwise, if `batch_norm` is
None and a `b_init` and `use_bias` is provided then a `biases` variable would be
created and added the hidden units. Finally, if `activation` is not `None`,
it is applied to the hidden units as well.
Note: that if `x` have a rank 4
Args:
x: A 4-D `Tensor` of with rank 4 and value for the last dimension,
i.e. `[batch_size, in_height, in_width, depth]`,
is_training: Bool, training or testing
n_output: Integer or long, the number of output units in the layer.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
filter_size: a int or list/tuple of 2 positive integers specifying the spatial
dimensions of of the filters.
dilation: A positive int32. The stride with which we sample input values across
the height and width dimensions. Equivalently, the rate by which we upsample the
filter values by inserting zeros across the height and width dimensions. In the literature,
the same parameter is sometimes called input stride/rate or dilation.
padding: one of `"VALID"` or `"SAME"`. IF padding is LEFT, it preprocess the input to use
Valid padding
activation: activation function, set to None to skip it and maintain
a linear activation.
batch_norm: normalization function to use. If
`batch_norm` is `True` then google original implementation is used and
if another function is provided then it is applied.
default set to None for no normalizer function
batch_norm_args: normalization function parameters.
w_init: An initializer for the weights.
w_regularizer: Optional regularizer for the weights.
untie_biases: spatial dimensions wise baises
b_init: An initializer for the biases. If None skip biases.
outputs_collections: The collections to which the outputs are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: Optional name or scope for variable_scope/name_scope.
use_bias: Whether to add bias or not
Returns:
The 4-D `Tensor` variable representing the result of the series of operations.
e.g.: 4-D `Tensor` [batch, new_height, new_width, n_output].
Raises:
ValueError: if x has rank less than 4 or if its last dimension is not set.
"""
if 'padding' in kwargs and kwargs['padding'] == 'LEFT':
inputs, kwargs = format_input_left_padding(inputs, **kwargs)
return dilated_conv2d(inputs, n_output_channels, is_training, reuse, **kwargs)
def conv2d_gru(inputs,
n_output_channels,
is_training,
reuse,
filter_size=3,
padding="SAME",
dilation=1,
name='conv2d_gru',
outputs_collections=None,
**kwargs):
"""Adds a convolutional GRU layer in 1 dimension.
Args:
x: A 4-D `Tensor` of with rank 4 and value for the last dimension,
i.e. `[batch_size, in_height, in_width, depth]`,
is_training: Bool, training or testing
n_output: Integer or long, the number of output units in the layer.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
filter_size: a int or list/tuple of 2 positive integers specifying the spatial
dimensions of of the filters.
dilation: A positive int32. The stride with which we sample input values across
the height and width dimensions. Equivalently, the rate by which we upsample the
filter values by inserting zeros across the height and width dimensions. In the literature,
the same parameter is sometimes called input stride/rate or dilation.
padding: one of `"VALID"` or `"SAME"`. IF padding is LEFT, it preprocess the
input to use Valid padding
activation: activation function, set to None to skip it and maintain
a linear activation.
batch_norm: normalization function to use. If
`batch_norm` is `True` then google original implementation is used and
if another function is provided then it is applied.
default set to None for no normalizer function
batch_norm_args: normalization function parameters.
w_init: An initializer for the weights.
w_regularizer: Optional regularizer for the weights.
untie_biases: spatial dimensions wise baises
b_init: An initializer for the biases. If None skip biases.
outputs_collections: The collections to which the outputs are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: Optional name or scope for variable_scope/name_scope.
use_bias: Whether to add bias or not
Returns:
The 4-D `Tensor` variable representing the result of the series of operations.
e.g.: 4-D `Tensor` [batch, new_height, new_width, n_output].
Raises:
ValueError: if x has rank less than 4 or if its last dimension is not set.
"""
def conv2d_fn(x, name, bias_start, padding):
return conv2d_v2(
x,
n_output_channels,
is_training,
reuse,
filter_size=filter_size,
padding=padding,
b_init=bias_start,
dilation=dilation,
name=name,
**kwargs)
with tf.variable_scope(name, reuse=reuse):
reset = saturating_sigmoid(conv2d_fn(inputs, "reset", 1.0, padding))
gate = saturating_sigmoid(conv2d_fn(inputs, "gate", 1.0, padding))
candidate = tf.tanh(conv2d_fn(reset * inputs, "candidate", 0.0, padding))
outputs = gate * inputs + (1 - gate) * candidate
return _collect_named_outputs(outputs_collections, name, outputs)
def conv2d_lstm(inputs,
n_output_channels,
is_training,
reuse,
filter_size=3,
padding="SAME",
dilation=1,
name='conv2d_gru',
outputs_collections=None,
**kwargs):
"""Adds a convolutional LSTM layer in 1 dimension.
Args:
x: A 4-D `Tensor` of with rank 4 and value for the last dimension,
i.e. `[batch_size, in_height, in_width, depth]`,
is_training: Bool, training or testing
n_output: Integer or long, the number of output units in the layer.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
filter_size: a int or list/tuple of 2 positive integers specifying the spatial
dimensions of of the filters.
dilation: A positive int32. The stride with which we sample input values across
the height and width dimensions. Equivalently, the rate by which we upsample the
filter values by inserting zeros across the height and width dimensions. In the literature,
the same parameter is sometimes called input stride/rate or dilation.
padding: one of `"VALID"` or `"SAME"`. IF padding is LEFT, it preprocess the
input to use Valid padding
activation: activation function, set to None to skip it and maintain
a linear activation.
batch_norm: normalization function to use. If
`batch_norm` is `True` then google original implementation is used and
if another function is provided then it is applied.
default set to None for no normalizer function
batch_norm_args: normalization function parameters.
w_init: An initializer for the weights.
w_regularizer: Optional regularizer for the weights.
untie_biases: spatial dimensions wise baises
b_init: An initializer for the biases. If None skip biases.
outputs_collections: The collections to which the outputs are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: Optional name or scope for variable_scope/name_scope.
use_bias: Whether to add bias or not
Returns:
The 4-D `Tensor` variable representing the result of the series of operations.
e.g.: 4-D `Tensor` [batch, new_height, new_width, n_output].
Raises:
ValueError: if x has rank less than 4 or if its last dimension is not set.
"""
with tf.variable_scope(name, reuse=reuse):
gates = conv2d_v2(
inputs,
4 * n_output_channels,
is_training,
reuse,
filter_size=filter_size,
padding=padding,
dilation=dilation,
name=name,
**kwargs)
g = tf.split(layer_norm(gates, 4 * n_ouput_channels), 4, axis=3)
new_cell = tf.sigmoid(g[0]) * x + tf.sigmoid(g[1]) * tf.tanh(g[3])
outputs = tf.sigmoid(g[2]) * tf.tanh(new_cell)
return _collect_named_outputs(outputs_collections, name, outputs)
def conv2d_diagonal_gru(inputs,
n_output_channels,
is_training,
reuse,
filter_size=3,
padding="SAME",
dilation=1,
dropout=0.0,
name='conv2d_gru',
outputs_collections=None,
**kwargs):
"""Adds a convolutional diagonal GRU layer in 1 dimension.
Args:
x: A 4-D `Tensor` of with rank 4 and value for the last dimension,
i.e. `[batch_size, in_height, in_width, depth]`,
is_training: Bool, training or testing
n_output: Integer or long, the number of output units in the layer.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
filter_size: a int or list/tuple of 2 positive integers specifying the spatial
dimensions of of the filters.
dilation: A positive int32. The stride with which we sample input values across
the height and width dimensions. Equivalently, the rate by which we upsample the
filter values by inserting zeros across the height and width dimensions. In the literature,
the same parameter is sometimes called input stride/rate or dilation.
padding: one of `"VALID"` or `"SAME"`. IF padding is LEFT, it preprocess the
input to use Valid padding
activation: activation function, set to None to skip it and maintain
a linear activation.
batch_norm: normalization function to use. If
`batch_norm` is `True` then google original implementation is used and
if another function is provided then it is applied.
default set to None for no normalizer function
batch_norm_args: normalization function parameters.
w_init: An initializer for the weights.
w_regularizer: Optional regularizer for the weights.
untie_biases: spatial dimensions wise baises
b_init: An initializer for the biases. If None skip biases.
outputs_collections: The collections to which the outputs are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: Optional name or scope for variable_scope/name_scope.
use_bias: Whether to add bias or not
Returns:
The 4-D `Tensor` variable representing the result of the series of operations.
e.g.: 4-D `Tensor` [batch, new_height, new_width, n_output].
Raises:
ValueError: if x has rank less than 4 or if its last dimension is not set.
"""
def conv2d_fn(x, name, bias_start):
return conv2d_v2(
x,
n_output_channels,
is_training,
reuse,
filter_size=filter_size,
padding=padding,
b_init=bias_start,
dilation=dilation,
name=name,
**kwargs)
with tf.variable_scope(name, reuse=reuse):
reset, reset_cost = hard_sigmoid(conv2d_fn(x, "reset", 0.5))
gate, gate_cost = hard_sigmoid(conv2d_fn(x, "gate", 0.7))
candidate = tf.tanh(conv2d_fn(reset * x, "candidate", 0.0))
if dropout > 0.0:
candidate = tf.layers.dropout(candidate, dropout, training=is_training)
# Diagonal shift.
shift_filters = n_output_channels // 3
base_filter = ([[0, 1, 0]] * (n_output_channels - 2 * shift_filters) +
[[1, 0, 0]] * shift_filters + [[0, 0, 1]] * shift_filters)
shift_filter = tf.constant(np.transpose(base_filter), dtype=tf.float32)
shift_filter = tf.expand_dims(tf.expand_dims(shift_filter, 0), 3)
x_shifted = tf.nn.depthwise_conv2d(x, shift_filter, [1, 1, 1, 1], padding="SAME")
# Return the gated result and cost.
total_cost_avg = 0.5 * (reset_cost + gate_cost)
outputs = gate * x_shifted + (1 - gate) * candidate, total_cost_avg
return _collect_named_outputs(outputs_collections, name, outputs)
def multiscale_conv2d_sum(inputs,
n_output_channels,
is_training,
reuse,
dilation_rates_and_filter_sizes,
pooling_type,
name='multiscale_conv2d_sum',
outputs_collections=None,
**kwargs):
"""Sum of several dilated convolutions.
For all convolutions with dilation_rate > 1, we first pool the input with
width dilation_rate.
Args:
x: A 4-D `Tensor` of with rank 4 and value for the last dimension,
i.e. `[batch_size, in_height, in_width, depth]`,
is_training: Bool, training or testing
n_output: Integer or long, the number of output units in the layer.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
filter_size: a int or list/tuple of 2 positive integers specifying the spatial
dimensions of of the filters.
activation: activation function, set to None to skip it and maintain
a linear activation.
batch_norm: normalization function to use. If
`batch_norm` is `True` then google original implementation is used and
if another function is provided then it is applied.
default set to None for no normalizer function
batch_norm_args: normalization function parameters.
w_init: An initializer for the weights.
w_regularizer: Optional regularizer for the weights.
untie_biases: spatial dimensions wise baises
b_init: An initializer for the biases. If None skip biases.
outputs_collections: The collections to which the outputs are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: Optional name or scope for variable_scope/name_scope.
use_bias: Whether to add bias or not
dilation_rates_and_kernel_sizes: a list of pairs (dilation, kernel_size)
pooling_type: "AVG" or "MAX"
**kwargs: additional
Returns:
The 4-D `Tensor` variable representing the result of the series of operations.
e.g.: 4-D `Tensor` [batch, new_height, new_width, n_output].
Raises:
ValueError: if x has rank less than 4 or if its last dimension is not set.
"""
with tf.variable_scope(name, reuse=reuse):
padding = kwargs["padding"]
results, counter = [], -1
for dilation_rate, filter_size in dilation_rates_and_filter_sizes:
counter += 1
if dilation_rate[0] > 1:
pooled = pool2d(inputs, filter_size, pooling_type, padding)
else:
pooled = inputs
results.append(
conv2d_v2(
pooled,
n_output_channels,
is_training,
reuse,
filter_size=filter_size,
dilation=dilation_rate,
name="conv_layer%d" % counter,
**kwargs))
outputs = tf.add_n(results) * (len(results)**-0.5)
return _collect_named_outputs(outputs_collections, name, outputs)
def conv1d_memory_efficient(x,
n_output,
is_training,
reuse,
trainable=True,
w_init=initz.he_normal(),
w_regularizer=tf.nn.l2_loss,
epsilon=1e-6,
forget=True,
test_vars=None,
name='conv1d_memory_efficient'):
"""LayerNorm, Conv, ReLU, Conv.
All convolutions have kernel size 1.
returns conv(relu(conv(layer_norm(x))))
Args:
x: input Tensor with shape [batch, length, io_size]
n_output: an integer - size of the hidden layer.
is_training: Bool, training or testing
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
epsilon: a float (for layer norm)
forget: a boolean - forget forwards activations and recompute on backprop
test_vars: optional tuple of variables for testing purposes
name: an optional string
Returns:
a Tensor with shape [batch, length, io_size]
"""
io_size = x.get_shape().as_list()[-1]
def forward_internal(x, f1, f2, scale, bias):
"""Forward function."""
num_splits = 4
x_flat = tf.reshape(x, [-1, 1, tf.shape(x)[2]])
xs = approximate_split(x_flat, num_splits)
ys = []
for i in range(num_splits):
with tf.control_dependencies(ys[-1:]):
n = _layer_norm_compute_python(xs[i], epsilon, scale, bias)
y = tf.nn.conv1d(n, f1, 1, "SAME")
y = tf.nn.relu(y)
y = tf.nn.conv1d(y, f2, 1, "SAME")
ys.append(y)
y = tf.concat(ys, 0)
y = tf.reshape(y, tf.shape(x))
return y
key = ("conv1d_memory_efficient %s" % epsilon)
if not forget:
forward_fn = forward_internal
elif key in _function_cache:
forward_fn = _function_cache[key]
else:
@function.Defun(compiled=True)
def grad_fn(x, f1, f2, scale, bias, dy):
with tf.control_dependencies([dy]):
num_splits = 4
x_shape = tf.shape(x)
flat_shape = [-1, 1, x_shape[2]]
x = tf.reshape(x, flat_shape)
dy = tf.reshape(dy, flat_shape)
xs = approximate_split(x, num_splits)
dys = approximate_split(dy, num_splits)
dxs = []
df1 = 0
df2 = 0
dscale = 0
dbias = 0
deps = []
for i in range(num_splits):
with tf.control_dependencies(deps):
n = _layer_norm_compute_python(xs[i], epsilon, scale, bias)
y = tf.nn.conv1d(n, f1, 1, "SAME")
y = tf.nn.relu(y)
y = tf.nn.conv1d(y, f2, 1, "SAME")
dxi, pdf1, pdf2, pdscale, pdbias = tf.gradients(
ys=[y], xs=[xs[i], f1, f2, scale, bias], grad_ys=[dys[i]])
df1 += pdf1
df2 += pdf2
dscale += pdscale
dbias += pdbias
dxs.append(dxi)
deps = [dxi, df1, df2, dscale, dbias]
with tf.control_dependencies(deps):
dx = tf.concat(dxs, 0)
dx = tf.reshape(dx, x_shape)
return dx, df1, df2, dscale, dbias
@function.Defun(grad_func=grad_fn, compiled=True, separate_compiled_gradients=True)
def forward_fn(x, f1, f2, scale, bias):
return forward_internal(x, f1, f2, scale, bias)
with tf.variable_scope(name, reuse=reuse, default_name="ffn2", values=[x]):
if test_vars is not None:
f1, f2, scale, bias = list(test_vars)
else:
f1 = tf.get_variable(
"f1", [1, io_size, n_output],
trainable=trainable,
initializer=w_init,
regularizer=w_regularizer)
f2 = tf.get_variable(
"f2", [1, n_output, io_size],
trainable=trainable,
initializer=w_init,
regularizer=w_regularizer)
scale = tf.get_variable(
"layer_norm_scale", [io_size], initializer=tf.ones_initializer(), trainable=trainable)
bias = tf.get_variable(
"layer_norm_bias", [io_size], initializer=tf.zeros_initializer(), trainable=trainable)
if forget:
y = forward_fn(x, f1, f2, scale, bias)
else:
y = forward_internal(x, f1, f2, scale, bias)
y.set_shape(x.get_shape())
return y
def approximate_split(x, num_splits, axis=0):
"""Split approximately equally into num_splits parts.
Args:
x: a Tensor
num_splits: an integer
axis: an integer.
Returns:
a list of num_splits Tensors.
"""
size = shape_list(x)[axis]
size_splits = [tf.div(size + i, num_splits) for i in range(num_splits)]
return tf.split(x, size_splits, axis=axis)
def pool2d(inputs,
filter_size=(3, 3),
pooling_type='AVG',
padding='SAME',
strides=(1, 1),
outputs_collections=None,
name='general_pool',
**kwargs):
"""General pooling layer; Supports LEFT padding.
Args:
x: A 4-D 'Tensor` of shape `[batch_size, height, width, channels]`
filter_size: A int or list/tuple of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A int or list/tuple of length 2: [stride_height, stride_width].
padding: The padding method, either 'VALID' or 'SAME'.
outputs_collections: The collections to which the outputs are added.
name: Optional scope/name for name_scope.
pooling_type: "AVG" or "MAX"
**kwargs: additional
Returns:
A `Tensor` representing the results of the pooling operation.
e.g.: 4-D `Tensor` [batch, new_height, new_width, channels].
Raises:
ValueError: If `input` is not 4-D array
"""
with tf.name_scope("pool", [inputs]):
static_shape = inputs.get_shape()
if not static_shape or len(static_shape) != 4:
raise ValueError("Inputs to conv must have statically known rank 4.")
# Add support for left padding.
if padding == "LEFT":
assert filter_size[0] % 2 == 1 and filter_size[1] % 2 == 1
if len(static_shape) == 3:
width_padding = 2 * (filter_size[1] // 2)
padding_ = [[0, 0], [width_padding, 0], [0, 0]]
else:
height_padding = 2 * (filter_size[0] // 2)
cond_padding = tf.cond(
tf.equal(tf.shape(inputs)[2], 1), lambda: tf.constant(0),
lambda: tf.constant(2 * (filter_size[1] // 2)))
width_padding = 0 if static_shape[2] == 1 else cond_padding
padding_ = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
inputs = tf.pad(inputs, padding_)
inputs.set_shape([static_shape[0], None, None, static_shape[3]])
padding = "VALID"
outputs = tf.nn.pool(inputs, filter_size, pooling_type, padding, strides=strides)
return _collect_named_outputs(outputs_collections, name, outputs)
def variable_ref(t):
"""Find the variable ref, ignoring Identity ops.
Args:
t: a Tensor
Returns:
a Tensor that is a variable ref, or None on error.
"""
while t.op.type == "Identity":
t = t.op.inputs[0]
if "Variable" in t.op.type:
return t
else:
return None
def _acc_grads(*lists_of_grads):
"""Accumulates lists of gradients."""
acc_grads = []
for grads in zip(*lists_of_grads):
grads = [g for g in grads if g is not None]
if grads:
acc_grads.append(tf.add_n(grads))
else:
acc_grads.append(None)
return acc_grads
def _rev_layer_forward(xs, f, g, f_side_input, g_side_input, gate_outputs=False):
"""Forward for 1 reversible layer."""
x1, x2 = xs
with tf.variable_scope("f"):
y1 = x1 + (f(x2, f_side_input) if f_side_input else f(x2))
with tf.variable_scope("g"):
y2 = x2 + (g(y1, g_side_input) if g_side_input else g(y1))
if gate_outputs:
return tf.tuple([y1, y2])
else:
return (y1, y2)
def _rev_layer_backward(ys, grad_ys, f, g, f_vars, f_side_input, g_vars, g_side_input):
"""Backprop for 1 layer."""
y1, y2 = ys
grad_y1, grad_y2 = grad_ys
# Reconstruct intermediates and inputs (x1, x2)
# stop_gradients required on fn inputs to prevent infinite recursion into this
# grad function on the calls to tf.gradients.
y1_stop = tf.stop_gradient(y1)
g_side_input = [tf.stop_gradient(t) for t in g_side_input]
with tf.variable_scope("g"):
gy1 = g(y1_stop, g_side_input) if g_side_input else g(y1_stop)
x2 = y2 - gy1
x2_stop = tf.stop_gradient(x2)
f_side_input = [tf.stop_gradient(t) for t in f_side_input]
with tf.variable_scope("f"):
fx2 = f(x2_stop, f_side_input) if f_side_input else f(x2_stop)
x1 = y1 - fx2
# Compute gradients wrt to inputs
# dL/dy2 * dG(y1)/y1
grad_gy1_y2 = tf.gradients(gy1, y1_stop, grad_y2)[0]
grad_x1 = grad_y1 + grad_gy1_y2
grad_x2 = (
tf.gradients(fx2, x2_stop, grad_y1)[0] + grad_y2 + tf.gradients(fx2, x2_stop, grad_gy1_y2)[0])
# Compute gradients wrt to vars and side inputs in f and g
grads1 = tf.gradients(gy1, g_vars + g_side_input, grad_y2)
grad_g_vars, grad_g_side = grads1[:len(g_vars)], grads1[len(g_vars):]
grads2 = tf.gradients(fx2, f_vars + f_side_input, grad_y1)
grad_f_y1, grad_f_side1 = grads2[:len(f_vars)], grads2[len(f_vars):]
grads3 = tf.gradients(fx2, f_vars + f_side_input, grad_gy1_y2)
grad_f_y2, grad_f_side2 = grads3[:len(f_vars)], grads3[len(f_vars):]
grad_f_vars = _acc_grads(grad_f_y1, grad_f_y2)
grad_f_side = _acc_grads(grad_f_side1, grad_f_side2)
# Put returns in a tuple to ensure a constant memory budget (i.e. don't want
# the subsequent layer to start computing and consuming memory based on a
# subset of these values).
outs = tf.tuple([x1, x2, grad_x1, grad_x2] + grad_f_vars + grad_g_vars + grad_f_side + grad_g_side)
x1, x2, grad_x1, grad_x2 = outs[:4]
grad_f_vars_end = 4 + len(grad_f_vars)
grad_g_vars_end = grad_f_vars_end + len(grad_g_vars)
grad_f_side_end = grad_g_vars_end + len(grad_f_side)
grad_f_vars = outs[4:grad_f_vars_end]
grad_g_vars = outs[grad_f_vars_end:grad_g_vars_end]
grad_f_side = outs[grad_g_vars_end:grad_f_side_end]
grad_g_side = outs[grad_f_side_end:]
return ((x1, x2), (grad_x1, grad_x2), (grad_f_vars, grad_f_side), (grad_g_vars, grad_g_side))
def _rev_block_forward(x1,
x2,
f,
g,
num_layers=1,
f_side_input=None,
g_side_input=None,
layer_scopes=None,
gate_outputs=False,
name=None):
"""Forward for a series of reversible layers."""
out = (x1, x2)
with tf.variable_scope(name, default_name="revblock"):
for i in range(num_layers):
with tf.variable_scope("revlayer_%d" % i) as layer_vs:
if layer_scopes is not None:
layer_scopes.append(layer_vs)
out = _rev_layer_forward(
out, f[i], g[i], f_side_input, g_side_input, gate_outputs=gate_outputs)
y1, y2 = out
return y1, y2
LAYER_RE = re.compile(".*revlayer_([0-9]*)/([fg])/.*")
def rev_block(x1, x2, f, g, num_layers=1, f_side_input=None, g_side_input=None, is_training=True):
"""A block of reversible residual layers.
A reversible residual layer is defined as:
```
y1 = x1 + f(x2, f_side_input)
y2 = x2 + g(y1, g_side_input)
```
A reversible residual block, defined here, is a series of reversible residual
layers.
Limitations:
* f and g must not close over any Tensors; all side inputs to f and g should
be passed in with f_side_input and g_side_input which will be forwarded to
f and g.
* f and g must not change the dimensionality of their inputs in order for the
addition in the equations above to work.
Args:
x1: a float Tensor.
x2: a float Tensor.
f: a function, (Tensor) -> (Tensor) (or list of such of length num_layers).
Should not change the shape of the Tensor. Expected to create variables.
See f_side_input if there are side inputs.
g: a function, (Tensor) -> (Tensor) (or list of such of length num_layers).
Should not change the shape of the Tensor. Expected to create variables.
See g_side_input if there are side inputs.
num_layers: int, number of reversible residual layers. Each layer will
apply f and g according to the equations above, with new variables in each
layer.
f_side_input: list of Tensors, side input to f. If not None, signature of f
should be (Tensor, list<Tensor>) -> (Tensor).
g_side_input: list of Tensors, side input to g. If not None, signature of g
should be (Tensor, list<Tensor>) -> (Tensor).
is_training: bool, whether to actually use the efficient backprop codepath.
Returns:
y1, y2: tuple of float Tensors.
"""
if f_side_input is None:
f_side_input = []
if g_side_input is None:
g_side_input = []
if isinstance(f, list):
assert len(f) == num_layers
else:
f = [f] * num_layers
if isinstance(g, list):
assert len(g) == num_layers
else:
g = [g] * num_layers
# Filled by the forward function below
layer_scopes = []
def custom_grad_fn(inputs, variables, ys, grad_ys):
"""Custom gradient fn for a block of reversible residual layers."""
side_inputs = inputs[2:]
f_side_idxs = [None] * len(f_side_input)
g_side_idxs = [None] * len(g_side_input)
assert len(side_inputs) == len(f_side_input) + len(g_side_input)
for i, t in enumerate(side_inputs):
if t in f_side_input:
f_side_idxs[f_side_input.index(t)] = i
elif t in g_side_input:
g_side_idxs[g_side_input.index(t)] = i
else:
assert False
f_vars = [[] for _ in range(num_layers)]
g_vars = [[] for _ in range(num_layers)]
f_vars_idxs = [[] for _ in range(num_layers)]
g_vars_idxs = [[] for _ in range(num_layers)]
for i, t in enumerate(variables):
ref = variable_ref(t)
# Use the name to identify the layer number and function (f or g)
regex = LAYER_RE.match(ref.name)
layer_no = int(regex.group(1))
fn_name = regex.group(2)
if fn_name == "f":
f_vars[layer_no].append(ref)
f_vars_idxs[layer_no].append(i)
else:
assert fn_name == "g"
g_vars[layer_no].append(ref)
g_vars_idxs[layer_no].append(i)
f_var_grads = []
g_var_grads = []
f_side_grads = []
g_side_grads = []
# Reverse variable containers to go backward
layer_scopes.reverse()
f_vars.reverse()
g_vars.reverse()
f.reverse()
g.reverse()
for i in range(num_layers):
with tf.variable_scope(layer_scopes[i], reuse=True):
ys, grad_ys, f_ret, g_ret = _rev_layer_backward(ys, grad_ys, f[i], g[i], f_vars[i],
f_side_input, g_vars[i], g_side_input)
grad_f_vars, grad_f_side = f_ret
grad_g_vars, grad_g_side = g_ret
f_var_grads.append(grad_f_vars)
g_var_grads.append(grad_g_vars)
f_side_grads.append(grad_f_side)
g_side_grads.append(grad_g_side)
# Accumulate layer gradients for f_side_input and g_side_input
acc_f_side_grads = _acc_grads(*f_side_grads)
acc_g_side_grads = _acc_grads(*g_side_grads)
# Use the stored idxs to put gradients in the passed-in order.
side_input_grads = [None] * len(side_inputs)
variable_grads = [None] * len(variables)
# Variable gradients were collected in reverse layer order. Reverse to match
# idxs.
f_var_grads.reverse()
g_var_grads.reverse()
for idxs, grads in list(zip(f_vars_idxs, f_var_grads)) + list(zip(g_vars_idxs, g_var_grads)):
for i, grad in zip(idxs, grads):
variable_grads[i] = grad
for i, grad in zip(f_side_idxs, acc_f_side_grads):
side_input_grads[i] = grad
for i, grad in zip(g_side_idxs, acc_g_side_grads):
side_input_grads[i] = grad
grad_x1, grad_x2 = grad_ys
return [grad_x1, grad_x2] + side_input_grads, variable_grads
# Need a forward function with positional arguments
@fn_with_custom_grad(custom_grad_fn if is_training else None)
def forward(x1, x2, *side_inputs):
f_side = side_inputs[:len(f_side_input)]
g_side = side_inputs[len(f_side_input):]
return _rev_block_forward(
x1,
x2,
f,
g,
num_layers=num_layers,
f_side_input=f_side,
g_side_input=g_side,
layer_scopes=layer_scopes,
gate_outputs=is_training)
return forward(x1, x2, *(f_side_input + g_side_input))
def recompute_grad(fn):
"""Decorator that recomputes the function on the backwards pass.
Args:
fn: a function that takes Tensors (all as positional arguments) and returns
a tuple of Tensors.
Returns:
A wrapped fn that is identical to fn when called, but its activations will
be discarded and recomputed on the backwards pass (i.e. on a call to
tf.gradients).
"""
@functools.wraps(fn)
def wrapped(*args):
return _recompute_grad(fn, args)
return wrapped
def _recompute_grad(fn, args):
"""See recompute_grad."""
def grad_fn(inputs, variables, outputs, output_grads):
del outputs
# recompute outputs
outputs = list(fn(*inputs))
grads = tf.gradients(outputs, inputs + variables, output_grads)
grad_inputs = grads[:len(inputs)]
grad_vars = grads[len(inputs):]
return grad_inputs, grad_vars
@fn_with_custom_grad(grad_fn)
def fn_with_recompute(*args):
return fn(*args)
return fn_with_recompute(*args)
def ffn_self_attention_layer(x,
filter_depth,
output_depth,
is_training,
reuse,
num_parts,
dropout_rate,
share_kv=False,
name=None):
"""Self-attention feedforward layer.
We use self-attention to do feedforward computations. We apply this function
positionwise where for each position, we linearly transform the output to have
depth filter_depth, and break up the result depth-wise into num_parts
contiguous parts. The parts self-attentd, we concatenate the results
depth-wise, and we linearly transform to a depth of output_depth. The
goal is to get multiplicative interactions between components of a
representation.
Args:
x: a Tensor with shape [batch, length, channels]
filter_depth: an integer
output_depth: an integer
num_parts: an integer dividing filter depth
dropout_rate: a floating point number
share_kv: Share the key value transform
name: an optional string
Returns:
A Tensor.
"""
with tf.variable_scope(name, default_name="feedforward_self_attention", values=[x]):
x_shape = tf.shape(x)
part_depth = filter_depth // num_parts
if not share_kv:
combined = conv1d(x, filter_depth * 3, is_training, reuse, filter_size=1, name="qkv_transform")
combined = tf.expand_dims(combined, axis=2)
q, k, v = tf.split(combined, 3, axis=3)
else:
q = tf.expand_dims(
conv1d(x, filter_depth, is_training, reuse, filter_size=1, name="q_transform"), axis=2)
kv_combined = tf.expand_dims(
conv1d(
tf.concat([x, x], axis=1),
filter_depth,
is_training,
reuse,
filter_size=1,
name="kv_transform"),
axis=2)
k, v = tf.split(kv_combined, [x_shape[1], x_shape[1]], axis=1)
batch_q = tf.reshape(q, [-1, 1, num_parts, part_depth])
batch_k = tf.reshape(k, [-1, 1, num_parts, part_depth])
batch_v = tf.reshape(v, [-1, 1, num_parts, part_depth])
batch_q *= part_depth**-0.5
# non-masked bias
bias = None
x = dot_product_attention(batch_q, batch_k, batch_v, bias, dropout_rate)
x = tf.reshape(x, [x_shape[0], x_shape[1], filter_depth])
x = conv1d(x, output_depth, is_training, reuse, filter_size=1, name="output_transform")
return x
def dot_product_attention(q,
k,
v,
bias,
dropout_rate=0.0,
image_shapes=None,
name=None,
make_image_summary=True):
"""dot-product attention.
Args:
q: a Tensor with shape [batch, heads, length_q, depth_k]
k: a Tensor with shape [batch, heads, length_kv, depth_k]
v: a Tensor with shape [batch, heads, length_kv, depth_v]
bias: bias Tensor (see attention_bias())
dropout_rate: a floating point number
image_shapes: optional tuple of integer scalars.
see comments for attention_image_summary()
name: an optional string
make_image_summary: True if you want an image summary.
Returns:
A Tensor.
"""
with tf.variable_scope(name, default_name="dot_product_attention", values=[q, k, v]):
logits = tf.matmul(q, k, transpose_b=True)
if bias is not None:
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
weights = tf.nn.dropout(weights, 1.0 - dropout_rate)
return tf.matmul(weights, v)
def fn_device_dependency_dict():
"""State container for fn_device_dependency."""
if not hasattr(tf.get_default_graph(), "dependency_dict"):
setattr(tf.get_default_graph(), "dependency_dict", defaultdict(list))
return tf.get_default_graph().dependency_dict
@contextlib.contextmanager
def fn_device_dependency(name, device=""):
"""Add control deps for name and device."""
key = name + "_" + device
outs = []
def body():
with tf.control_dependencies(fn_device_dependency_dict()[key]):
yield outs
assert outs
deps = outs
if isinstance(outs[0], list) or isinstance(outs[0], tuple):
assert len(outs) == 1
deps = outs[0]
fn_device_dependency_dict()[key] = deps
if device:
with tf.device(device):
return body()
else:
return body()
def underlying_variable(t):
"""Find the underlying tf.Variable object.
Args:
t: a Tensor
Returns:
a tf.Varaible object.
"""
t = variable_ref(t)
assert t is not None
# make sure that the graph has a variable index and that it is up-to-date
if not hasattr(tf.get_default_graph(), "var_index"):
tf.get_default_graph().var_index = {}
var_index = tf.get_default_graph().var_index
for v in tf.global_variables()[len(var_index):]:
var_index[v.name] = v
return var_index[t.name]
def clip_gradient(net, clip_value_min, clip_value_max, name='clip_gradient'):
"""Clips respective gradients of a given tensor. Acts as identity for the
forward pass, but clips gradient tensor element-wise by value during the
backward pass. Any gradient values less than `clip_value_min` or greater than
`clip_values_max` are set to the respective limit values.
Args:
net: A `tf.Tensor`.
clip_value_min: A 0-D Tensor or scalar. The minimum value to clip by.
clip_value_max: A 0-D Tensor or scalar. The maximum value to clip by.
name: A name for the operation (optional, default 'clip_gradient').
Returns:
A `tf.Tensor` with the same type as the input tensor.
"""
def _clip_gradient_backward(unused_op, grad):
return tf.clip_by_value(grad, clip_value_min, clip_value_max)
@function.Defun(net.dtype, python_grad_func=_clip_gradient_backward, func_name="ClipGradient")
def _clip_gradient_forward(x):
return x
with tf.name_scope(name, values=[net]):
output = _clip_gradient_forward(net)
output.set_shape(net.shape)
return output
def scale_gradient(net, scale, name="scale_gradient"):
"""Scales gradients for the backwards pass.
This might be used to, for example, allow one part of a model to learn at a
lower rate than the rest.
WARNING: Think carefully about how your optimizer works. If, for example, you
use rmsprop, the gradient is always rescaled (with some additional epsilon)
towards unity. This means `scale_gradient` won't have the effect of
lowering the learning rate.
If `scale` is `0.0`, this op reduces to `tf.stop_gradient`. If `scale`
is `1.0`, this op reduces to `tf.identity`.
Args:
net: A `tf.Tensor`.
scale: The scale factor for the gradient on the backwards pass.
name: A name for the operation (optional).
Returns:
A `tf.Tensor` with the same type as the input tensor.
"""
if scale == 0.0:
return tf.stop_gradient(net, name=name)
elif scale == 1.0:
return tf.identity(net, name=name)
else:
def _scale_gradient_backward(unused, grad):
return tf.multiply(tf.convert_to_tensor(scale), grad)
@function.Defun(tf.float32, python_grad_func=_scale_gradient_backward, func_name="ScaleGradient")
def _scale_gradient_forward(x):
return x
with tf.name_scope(name, values=[net]):
output = _scale_gradient_forward(net)
output.set_shape(net.shape)
return output
def normalize_gradient(grad_scales=None, name='normalize_gradient'):
if grad_scales is not None:
grad_scales = np.float32(grad_scales)
def _normalize_grad_backward(op, grad):
grad_norm = tf.sqrt(tf.reduce_sum(grad**2, [1, 2, 3], keep_dims=True))
if grad_scales is not None:
grad *= grad_scales[:, None, None, None]
return grad / grad_norm
@function.Defun(
tf.float32, python_grad_func=_normalize_grad_backward, func_name="NormalizeGradient")
def _normalize_grad_forward(x):
return x
with tf.name_scope(name):
output = _normalize_grad_forward(net)
output.set_shape(net.shape)
return output
def relu_density_logit(x, reduce_dims):
"""logit(density(x)).
Useful for histograms.
Args:
x: a Tensor, typilcally the output of tf.relu
reduce_dims: a list of dimensions
Returns:
a Tensor
"""
frac = tf.reduce_mean(tf.to_float(x > 0.0), reduce_dims)
scaled = tf.log(frac + math.exp(-10)) - \
tf.log((1.0 - frac) + math.exp(-10))
return scaled
def clip_variables(optimizer, variables, weight_clip):
"""Modifies an optimizer so it clips weights to a certain value.
Args:
optimizer: An optimizer to perform variable weight clipping.
variables: A list of TensorFlow variables.
weight_clip: Positive python float to clip discriminator weights. Used to
enforce a K-lipschitz condition, which is useful for some GAN training
schemes (ex WGAN: https://arxiv.org/pdf/1701.07875).
Returns:
An optimizer to perform weight clipping after updates.
Raises:
ValueError: If `weight_clip` is less than 0.
"""
if weight_clip < 0:
raise ValueError('`discriminator_weight_clip` must be positive. Instead, was %s', weight_clip)
return VariableClippingOptimizer(
opt=optimizer,
# Do no reduction, so clipping happens per-value.
vars_to_clip_dims={var: []
for var in variables},
max_norm=weight_clip,
use_locking=True,
colocate_clip_ops_with_vars=True)
def shape_list(x):
"""Return list of dims, statically where possible."""
x = tf.convert_to_tensor(x)
# If unknown rank, return dynamic shape
if x.get_shape().dims is None:
return tf.shape(x)
static = x.get_shape().as_list()
shape = tf.shape(x)
ret = []
for i in range(len(static)):
dim = static[i]
if dim is None:
dim = shape[i]
ret.append(dim)
return ret
class FactoredTensor(object):
"""A concise factored representation of Tensor as two tensors.
This class represents the tensor tf.matmul(a, b, transpose_b=True)
by storing the values of Tensors a and b.
The reason for this is that the product may be too big to fully realize at
once, so it can be realized a part at a time.
"a" may have extra leading dimensions, in which case they are flattened out
before computing the matrix product, then re-expanded afterwards.
"""
def __init__(self, a, b):
self._a = a
self._b = b
@property
def a(self):
return self._a
@property
def b(self):
return self._b
def to_tensor(self):
"""Convert to Tensor."""
a_shape = shape_list(self.a)
b_shape = shape_list(self.b)
inner_dim = b_shape[1]
result_dim = b_shape[0]
flat_a = tf.reshape(self.a, [-1, inner_dim])
product = tf.matmul(flat_a, self.b, transpose_b=True)
product_shape = a_shape[:-1] + [result_dim]
product = tf.reshape(product, product_shape)
product.set_shape(self.a.get_shape().as_list()[:-1] + [self.b.get_shape()[0]])
return product
def _convert_factored_tensor_to_tensor(value, *args, **kwargs):
# call ops.convert_to_tensor to handle optional arguments appropriately
return ops.internal_convert_to_tensor(value.to_tensor(), *args, **kwargs)
tf.register_tensor_conversion_function(FactoredTensor, _convert_factored_tensor_to_tensor)
def maybe_zero_out_padding(inputs, kernel_size, nonpadding_mask):
"""If necessary, zero out inputs to a conv for padding positions.
Args:
inputs: a Tensor with shape [batch, length, ...]
kernel_size: an integer or pair of integers
nonpadding_mask: a Tensor with shape [batch, length]
Returns:
a Tensor with the same shape as inputs
"""
if (kernel_size != 1 and kernel_size != (1, 1) and nonpadding_mask is not None):
while nonpadding_mask.get_shape().ndims < inputs.get_shape().ndims:
nonpadding_mask = tf.expand_dims(nonpadding_mask, -1)
return inputs * nonpadding_mask
else:
return inputs
| [
"mrinal.haloi11@gmail.com"
] | mrinal.haloi11@gmail.com |
476399e7c973c8a2cb245b7971607aff9e124174 | 00c6ded41b84008489a126a36657a8dc773626a5 | /.history/Sizing_Method/ConstrainsAnalysis/ConstrainsAnalysisPD_20210714161036.py | 2e293c1a0392ad85dfd6a951f18ece8d6fd5fc67 | [] | no_license | 12libao/DEA | 85f5f4274edf72c7f030a356bae9c499e3afc2ed | 1c6f8109bbc18c4451a50eacad9b4dedd29682bd | refs/heads/master | 2023-06-17T02:10:40.184423 | 2021-07-16T19:05:18 | 2021-07-16T19:05:18 | 346,111,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,018 | py | # author: Bao Li #
# Georgia Institute of Technology #
import sys
import os
sys.path.insert(0, os.getcwd())
import numpy as np
import matplotlib.pylab as plt
import Sizing_Method.Other.US_Standard_Atmosphere_1976 as atm
import Sizing_Method.Aerodynamics.ThrustLapse as thrust_lapse
import Sizing_Method.Aerodynamics.Aerodynamics as ad
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysis as ca
from scipy.optimize import curve_fit
"""
The unit use is IS standard
"""
class ConstrainsAnalysis_Mattingly_Method_with_DP:
"""This is a power-based master constraints analysis"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.2, number_of_motor=12, C_DR=0):
"""
:param beta: weight fraction
:param Hp: P_motor/P_total
:param n: number of motor
:param K1: drag polar coefficient for 2nd order term
:param K2: drag polar coefficient for 1st order term
:param C_D0: the drag coefficient at zero lift
:param C_DR: additional drag caused, for example, by external stores,
braking parachutes or flaps, or temporary external hardware
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
self.beta = beta
self.hp = Hp
self.n = number_of_motor
# power lapse ratio
self.alpha = thrust_lapse.thrust_lapse_calculation(altitude=self.h,
velocity=self.v).high_bypass_ratio_turbofan()
self.k1 = ad.aerodynamics_without_pd(self.h, self.v).K1()
self.k2 = ad.aerodynamics_without_pd(self.h, self.v).K2()
self.cd0 = ad.aerodynamics_without_pd(self.h, self.v).CD_0()
self.cdr = C_DR
self.w_s = wing_load
self.g0 = 9.80665
self.coefficient = (1 - self.hp) * self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(self.h, self.v, Hp=self.hp, n=self.n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
self.cl = self.beta * self.w_s / self.q
# print(self.cl)
self.delta_cl = pd.delta_lift_coefficient(self.cl)
self.delta_cd0 = pd.delta_CD_0()
def master_equation(self, n, dh_dt, dV_dt):
cl = self.cl * n + self.delta_cl
cd = self.k1 * cl ** 2 + self.k2 * cl + self.cd0 + self.cdr + self.delta_cd0
p_w = self.coefficient * (self.q / (self.beta * self.w_s) * cd + dh_dt / self.v + dV_dt / self.g0)
return p_w
def cruise(self):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP.master_equation(self, n=1, dh_dt=0, dV_dt=0)
return p_w
def climb(self, roc):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP.master_equation(self, n=1, dh_dt=roc, dV_dt=0)
return p_w
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 300 knots, which is about 150 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180) * v / self.g0) ** 2) ** 0.5
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP.master_equation(self, n=load_factor, dh_dt=0, dV_dt=0)
return p_w
def take_off(self):
"""
A320neo take-off speed is about 150 knots, which is about 75 m/s
required runway length is about 2000 m
K_TO is a constant greater than one set to 1.2 (generally specified by appropriate flying regulations)
"""
Cl_max_to = 2.3 # 2.3
K_TO = 1.2 # V_TO / V_stall
s_G = 1266
p_w = 2 / 3 * self.coefficient / self.v * self.beta * K_TO ** 2 / (
s_G * self.rho * self.g0 * Cl_max_to) * self.w_s ** (
3 / 2)
return p_w
def stall_speed(self, V_stall_to=65, Cl_max_to=2.32):
V_stall_ld = 62
Cl_max_ld = 2.87
a = 10
w_s = 6000
while a >= 1:
cl = self.beta * w_s / self.q
delta_cl = ad.aerodynamics_with_pd(self.h, self.v, Hp=self.hp, n=self.n, W_S=w_s).delta_lift_coefficient(cl)
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * (Cl_max_to + delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * (Cl_max_ld + delta_cl)
W_S = min(W_S_1, W_S_2)
a = abs(w_s-W_S)
w_s = W_S
return W_S
def service_ceiling(self, roc=0.5):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP.master_equation(self, n=1, dh_dt=roc, dV_dt=0)
return p_w
allFuncs = [take_off, stall_speed, cruise, service_ceiling, level_turn, climb]
class ConstrainsAnalysis_Gudmundsson_Method_with_DP:
"""This is a power-based master constraints analysis based on Gudmundsson_method"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.2, number_of_motor=12, e=0.75, AR=10.3):
"""
:param tau: power fraction of i_th power path
:param beta: weight fraction
:param e: wing planform efficiency factor is between 0.75 and 0.85, no more than 1
:param AR: wing aspect ratio, normally between 7 and 10
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.beta = beta
self.w_s = wing_load
self.g0 = 9.80665
self.hp = Hp
self.n = number_of_motor
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
# power lapse ratio
self.alpha = thrust_lapse.thrust_lapse_calculation(altitude=self.h,
velocity=self.v).high_bypass_ratio_turbofan()
h = 2.43 # height of winglets
b = 35.8
ar_corr = AR * (1 + 1.9 * h / b) # equation 9-88, If the wing has winglets the aspect ratio should be corrected
self.k = 1 / (np.pi * ar_corr * e)
self.coefficient = (1-self.hp) * self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(self.h, self.v, Hp=self.hp, n=self.n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
cl = self.beta * self.w_s / self.q
self.delta_cl = pd.delta_lift_coefficient(cl)
self.delta_cd0 = pd.delta_CD_0()
# TABLE 3-1 Typical Aerodynamic Characteristics of Selected Classes of Aircraft
cd_min = 0.02
cd_to = 0.03
cl_to = 0.8
self.v_to = 68
self.s_g = 1480
self.mu = 0.04
self.cd_min = cd_min + self.delta_cd0
self.cl = cl + self.delta_cl
self.cd_to = cd_to + self.delta_cd0
self.cl_to = cl_to + self.delta_cl
def cruise(self):
p_w = self.q / self.w_s * (self.cd_min + self.k * self.cl ** 2)
return p_w * self.coefficient
def climb(self, roc):
p_w = roc / self.v + self.q * self.cd_min / self.w_s + self.k * self.cl
return p_w * self.coefficient
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 100 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180) * v / self.g0) ** 2) ** 0.5
q = 0.5 * self.rho * v ** 2
p_w = q / self.w_s * (self.cd_min + self.k * (load_factor / q * self.w_s + self.delta_cl) ** 2)
return p_w * self.coefficient
def take_off(self):
q = self.q / 2
p_w = self.v_to ** 2 / (2 * self.g0 * self.s_g) + q * self.cd_to / self.w_s + self.mu * (
1 - q * self.cl_to / self.w_s)
return p_w * self.coefficient
def service_ceiling(self, roc=0.5):
vy = (2 / self.rho * self.w_s * (self.k / (3 * self.cd_min)) ** 0.5) ** 0.5
q = 0.5 * self.rho * vy ** 2
p_w = roc / vy + q / self.w_s * (self.cd_min + self.k * (self.w_s / q + self.delta_cl) ** 2)
# p_w = roc / (2 / self.rho * self.w_s * (self.k / (3 * self.cd_min)) ** 0.5) ** 0.5 + 4 * (
# self.k * self.cd_min / 3) ** 0.5
return p_w * self.coefficient
def stall_speed(self, V_stall_to=65, Cl_max_to=2.32):
V_stall_ld = 62
Cl_max_ld = 2.87
a = 10
w_s = 6000
while a >= 1:
cl = self.beta * w_s / self.q
delta_cl = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=self.n, W_S=w_s).delta_lift_coefficient(cl)
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * (Cl_max_to + delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * (Cl_max_ld + delta_cl)
W_S = min(W_S_1, W_S_2)
a = abs(w_s-W_S)
w_s = W_S
return W_S
allFuncs = [take_off, stall_speed, cruise, service_ceiling, level_turn, climb]
if __name__ == "__main__":
n = 200
w_s = np.linspace(100, 9000, n)
constrains_name = ['take off', 'stall speed', 'cruise', 'service ceiling', 'level turn @3000m',
'climb @S-L', 'climb @3000m', 'climb @7000m']
constrains = np.array([[0, 68, 0.988], [0, 80, 1], [11300, 230, 0.948],
[11900, 230, 0.8], [3000, 100, 0.984], [0, 100, 0.984],
[3000, 200, 0.975], [7000, 230, 0.96]])
color = ['c', 'k', 'b', 'g', 'y', 'plum', 'violet', 'm']
label = ['feasible region with PD', 'feasible region with PD', 'feasible region Gudmundsson',
'feasible region without PD', 'feasible region without PD', 'feasible region Mattingly']
m = constrains.shape[0]
p_w = np.zeros([2 * m, n])
for k in range(3):
plt.figure(figsize=(12, 8))
for i in range(m):
for j in range(n):
h = constrains[i, 0]
v = constrains[i, 1]
beta = constrains[i, 2]
if k == 0:
problem1 = ConstrainsAnalysis_Gudmundsson_Method_with_DP(h, v, beta, w_s[j])
problem2 = ca.ConstrainsAnalysis_Gudmundsson_Method(h, v, beta, w_s[j])
plt.title(r'Constraint Analysis: $\bf{Gudmundsson-Method}$ - Normalized to Sea Level')
elif k == 1:
problem1 = ConstrainsAnalysis_Mattingly_Method_with_DP(h, v, beta, w_s[j])
problem2 = ca.ConstrainsAnalysis_Mattingly_Method(h, v, beta, w_s[j])
plt.title(r'Constraint Analysis: $\bf{Mattingly-Method}$ - Normalized to Sea Level')
else:
problem1 = ConstrainsAnalysis_Gudmundsson_Method_with_DP(h, v, beta, w_s[j])
problem2 = ConstrainsAnalysis_Mattingly_Method_with_DP(h, v, beta, w_s[j])
plt.title(r'Constraint Analysis: $\bf{with}$ $\bf{DP}$ - Normalized to Sea Level')
if i >= 5:
p_w[i, j] = problem1.allFuncs[-1](problem1, roc=15 - 5 * (i - 5))
p_w[i + m, j] = problem2.allFuncs[-1](problem2, roc=15 - 5 * (i - 5))
else:
p_w[i, j] = problem1.allFuncs[i](problem1)
p_w[i + m, j] = problem2.allFuncs[i](problem2)
if i == 1:
l1a, = plt.plot(p_w[i, :], np.linspace(0, 250, n), color=color[i], label=constrains_name[i])
l1b, = plt.plot(p_w[i + m, :], np.linspace(0, 250, n), color=color[i], linestyle='--')
if k != 2:
l1 = plt.legend([l1a, l1b], ['with DP', 'without DP'], loc="upper right")
else:
l1 = plt.legend([l1a, l1b], ['Gudmundsson method', 'Mattingly method'], loc="upper right")
else:
plt.plot(w_s, p_w[i, :], color=color[i], label=constrains_name[i])
plt.plot(w_s, p_w[i + m, :], color=color[i], linestyle='--')
p_w[[0, 1]], p_w[[m, m+1]] = p_w[[1, 0]], p_w[[m+1, m]]
w_s = np.linspace(100, p_w[0, 1], n)
plt.fill_between(w_s, np.amax(p_w[0:m, :], axis=0), 200, color='b', alpha=0.25,
label=label[k])
w_s = np.linspace(100, p_w[m, 1], n)
plt.fill_between(w_s, np.amax(p_w[m+1:2 * m, :], axis=0), 200, color='r', alpha=0.25,
label=label[k + 3])
plt.plot(6012, 72, 'r*', markersize=10, label='True Conventional')
plt.xlabel('Wing Load: $W_{TO}$/S (N/${m^2}$)')
plt.ylabel('Power-to-Load: $P_{SL}$/$W_{TO}$ (W/N)')
plt.legend(bbox_to_anchor=(1.002, 1), loc="upper left")
plt.gca().add_artist(l1)
plt.xlim(100, 9000)
plt.ylim(0, 200)
plt.tight_layout()
plt.grid()
plt.show()
| [
"libao@gatech.edu"
] | libao@gatech.edu |
049fba86a17445a1cd47636ce635d7512068a5df | c80ec1805a7e6cb1bd3f4b3e383ef4f4cf164765 | /gen/filters/rules/person/_hasnoteregexp.py | fbb7fa8c1925049716fcab430b77a4c9100507fb | [] | no_license | balrok/gramps_addon | 57c8e976c47ea3c1d1298d3fd4406c13909ac933 | 0c79561bed7ff42c88714edbc85197fa9235e188 | refs/heads/master | 2020-04-16T03:58:27.818732 | 2015-02-01T14:17:44 | 2015-02-01T14:17:44 | 30,111,898 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,706 | py | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._hasnoteregexbase import HasNoteRegexBase
#-------------------------------------------------------------------------
# "People having notes that contain a substring"
#-------------------------------------------------------------------------
class HasNoteRegexp(HasNoteRegexBase):
name = _('People having notes containing <text>')
description = _("Matches people whose notes contain text "
"matching a regular expression")
| [
"carl.schoenbach@gmail.com"
] | carl.schoenbach@gmail.com |
12dcdf99e66883812b53494f15fcd2ba332a7379 | 8d598140ac081e17ce6a549c85feb9684ef79ea3 | /config/default.py | 0ecdb196401e9db03328e20af185a2d91087b888 | [] | no_license | thuunivercity/xichuangzhu | fae1f68fb0a6de8bf875fd21427210a3170f74ca | c0622bed831b9d4cdf776138a6fc66a2da67bf0d | refs/heads/master | 2021-12-30T17:41:55.652212 | 2015-10-21T03:14:48 | 2015-10-21T03:14:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,846 | py | # coding: utf-8
import os
class Config(object):
"""配置基类"""
# Flask app config
DEBUG = False
TESTING = False
SECRET_KEY = "\xb5\xb3}#\xb7A\xcac\x9d0\xb6\x0f\x80z\x97\x00\x1e\xc0\xb8+\xe9)\xf0}"
PERMANENT_SESSION_LIFETIME = 3600 * 24 * 7
SESSION_COOKIE_NAME = 'xcz_session'
# Root path of project
PROJECT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# Site domain
SITE_DOMAIN = "http://localhost:5000"
# SQLAlchemy config
# See:
# https://pythonhosted.org/Flask-SQLAlchemy/config.html#connection-uri-format
# http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html#database-urls
SQLALCHEMY_DATABASE_URI = "mysql://root:@localhost/xcz"
# SMTP config
MAIL_SERVER = ''
MAIL_PORT = 25
MAIL_USE_TLS = False
MAIL_USE_SSL = False
MAIL_DEBUG = DEBUG
MAIL_USERNAME = ''
MAIL_PASSWORD = ''
MAIL_DEFAULT_SENDER = ''
MAIL_MAX_EMAILS = None
MAIL_ADMIN_ADDR = '' # 管理员邮箱
# UploadSets config
UPLOADS_DEFAULT_DEST = "/var/www/xcz_uploads" # 上传文件存储路径
UPLOADS_DEFAULT_URL = "http://localhost/xcz_uploads/" # 上传文件访问URL
# Flask-DebugToolbar
DEBUG_TB_INTERCEPT_REDIRECTS = False
# Sentry config
SENTRY_DSN = ''
# Host string, used by fabric
HOST_STRING = ""
# Douban OAuth2 config
DOUBAN_CLIENT_ID = '0cf909cba46ce67526eb1d62ed46b35f'
DOUBAN_SECRET = '4c87a8ef33e6c6be'
DOUBAN_REDIRECT_URI = '%s/account/signin' % SITE_DOMAIN
DOUBAN_LOGIN_URL = "https://www.douban.com/service/auth2/auth?client_id=%s&redirect_uri=%s" \
"&response_type=code" % (DOUBAN_CLIENT_ID, DOUBAN_REDIRECT_URI)
# Aliyun OSS config
OSS_HOST = 'oss.aliyuncs.com'
OSS_KEY = ''
OSS_SECRET = ''
OSS_URL = ''
| [
"hustlzp@qq.com"
] | hustlzp@qq.com |
f15f150843fc6169ae571fb2b93a85e4245758ef | 1773166a92931b6e962d09d14fb0d734df161233 | /src/utilsmee/__init__.py | 55f9876804e07aad215404acb62a259f32abc7ae | [] | no_license | fran-jo/ScriptMEE | f46ccc5dfb4b103b959052df3fc8fd8b1ffa6dcf | eeb87cb7d3e6920216ed34083922353df0d24879 | refs/heads/master | 2021-01-10T11:46:22.674441 | 2017-11-27T16:57:14 | 2017-11-27T16:57:14 | 35,911,673 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | from viewdata import ViewData
from commandomc import CommandOMC | [
"fran_jo@hotmail.com"
] | fran_jo@hotmail.com |
4af81ae61793f4ff3cc60727dc25cc131f15c58d | 55ad581b6783ac1c4580586de8b303ef7f96c717 | /Python_scripts/SVM_train_hotsitesPlot.py | dadb93b6c2ce7ec93c1fc590c509d5314379329e | [] | no_license | chhetribsurya/PartridgeChhetri_etal | 35a65039d8079ff10af9dbf1af919e8a8a250f26 | dc7982621f4e0bd16a34b65a28f55a16bd104b0c | refs/heads/master | 2020-09-21T14:47:16.339955 | 2020-08-04T21:12:40 | 2020-08-04T21:12:40 | 224,821,392 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 34,641 | py |
#######################################
# HOT motif Sites SVM train: ##
#######################################
## Generate 5000 random sample for each fimo-bed file
import re, os, pickle
from os import makedirs
from os.path import exists, join, basename, expanduser
from glob import glob
import pandas as pd, numpy as np
output_dir = "/gpfs/gpfs1/home/schhetri/for_chris/batch_I/motifs_compinfo_analysis"
suboutput_dir = "kmer_svm/unique_TFs_motif"
if not os.path.exists(join(output_dir, suboutput_dir)):
os.makedirs(join(output_dir, suboutput_dir))
random_train_dir = "kmer_svm/random5000_samples"
if not os.path.exists(join(output_dir, random_train_dir)):
os.makedirs(join(output_dir, random_train_dir))
# Generate dict for fimo file list :
file_pat = "/gpfs/gpfs1/home/schhetri/for_chris/batch_I/fimo_motifs_total/idr_passed_motifs_total/unique_TFs/SL*fimo_motif*"
fimo_filelist = glob(file_pat)
# Generate dict for peak file list:
file_pat ="/gpfs/gpfs1/home/schhetri/for_chris/batch_I/idr_passed_peaks_total/unique_TFs/SL*narrowPeak*"
bed_filelist = glob(file_pat)
bed_regex_pat = re.compile(r'unique_TFs/.*narrowPeak_(.*)')
bed_filedict = {bed_regex_pat.findall(file)[0]:file for file in bed_filelist}
###### Random sampling at motif level:
def svm_fimo_motifs_model(fimo_file, tf_name, **kwargs):
motif_list = []
for key, values in kwargs.iteritems():
motif_list.append(values)
df = pd.read_csv(fimo_file, sep="\t")
df.rename(columns={"sequence name" : "chrom", "#pattern name" : "motif_name"}, inplace=True)
df = df.loc[df["motif_name"].isin(motif_list)]
df["chromStart"] = df["start"] + 1 # confirmed with pybed fasta to map right sequence
df["chromEnd"] = df["stop"] + 2 # as bed intersection is exclusive of end coord
df["tf_name"] = tf_name
df["motif_id"] = "MOTIF" + df["motif_name"].astype(str)
# Increases search-space up-and-downstream of motifs:
# df["chromStart"] = df["chromStart"] -2
# df["chromEnd"] = df["chromEnd"] +2
df["midpos"] = ((df["chromStart"] + df["chromEnd"])/2).astype(int)
df["chromStart"] = df["midpos"] - 15
df["chromEnd"] = df["midpos"] + 15
select_cols = ["chrom", "chromStart", "chromEnd", "motif_id", "tf_name", "strand"]
motif_select_df = df.loc[:,select_cols]
print("Current dimension of motif model : {}".format(motif_select_df.shape))
# motif_select_df.duplicated(keep=False)
motif_select_df = motif_select_df.drop_duplicates()
print("Dropping duplicates if any, current dimension of motif model : {}\n".format(motif_select_df.shape))
motif_sorted_df = motif_select_df.sort_values(["chrom", "chromStart","chromEnd"]).reset_index(drop=True)
return(motif_sorted_df)
regex_pat = re.compile(r'.*fimo_motif_(.*).txt$')
#fimo_file = "/gpfs/gpfs1/home/schhetri/for_chris/batch_I/fimo_motifs_total/idr_passed_motifs_total/unique_TFs/SL1060_SE_VS_SL1167_fimo_motif_TCF12.txt"
for fimo_file in fimo_filelist: #
TF_name = regex_pat.findall(basename(fimo_file))[0] #[('SL151597', 'SL151598', 'KLF6_v2[FLAG]')]
bed_file = bed_filedict[TF_name]
print("Currently processing : {} TF\n".format(TF_name))
fimo_coord_df = svm_fimo_motifs_model(fimo_file, TF_name, motif1=1, motif2=2, motif3=3, motif4=4, motif5=5)
fimo_coord_df.to_csv(join(output_dir, suboutput_dir, TF_name + "_motifs.bed"), sep="\t", header=False, index=False)
# Random sampling of 5000 motif-sites for SVM train:
if len(fimo_coord_df) > 5000:
np.random.seed(10)
sample_range = np.arange(0, len(fimo_coord_df))
rindex = np.random.choice(sample_range, 5000, replace=False) # random permutation
fimo_randn_df = fimo_coord_df.loc[rindex]
# Make sure header = False; as nullgenerate seq would be upset with header:
fimo_randn_df.to_csv(join(output_dir, random_train_dir, TF_name + "_motifs_sample.bed"), sep="\t", header=False, index=False)
else:
fimo_coord_df.to_csv(join(output_dir, random_train_dir, TF_name + "_motifs_sample.bed"), sep="\t", header=False, index=False)
# Set output dirs:
suboutput_dir = "kmer_svm_peaklevel/unique_TFs_motif"
if not os.path.exists(join(output_dir, suboutput_dir)):
os.makedirs(join(output_dir, suboutput_dir))
random_train_dir = "kmer_svm_peaklevel/random5000_samples"
if not os.path.exists(join(output_dir, random_train_dir)):
os.makedirs(join(output_dir, random_train_dir))
###### Random sampling at peak level:
def svm_fullpeak_model(peak_file):
tf_name = re.compile(r".*narrowPeak_(.*)$").findall(basename(peak_file))[0]
df_read = pd.read_csv(peak_file, header=None, sep="\t")
df_read = df_read.iloc[:,[0,1,2]]
df_read.columns = ["chr", "start", "end"]
df_read["tf_name"] = tf_name
select_cols = ["chr","start","end", "tf_name"]
df_read = df_read.loc[:,select_cols]
print df_read.shape
df_read = df_read.sort_values(select_cols).reset_index(drop=True)
return(df_read)
regex_pat = re.compile(r".*narrowPeak_(.*)$")
for peak_file in bed_filelist: #
TF_name = regex_pat.findall(basename(peak_file))[0] #[('SL151597', 'SL151598', 'KLF6_v2[FLAG]')]
bed_file = bed_filedict[TF_name]
print("Currently processing : {} TF\n".format(TF_name))
peak_coord_df = svm_fullpeak_model(peak_file)
peak_coord_df.to_csv(join(output_dir, suboutput_dir, TF_name + "_peaks.bed"), sep="\t", header=False, index=False)
# Random sampling of 5000 motif-sites for SVM train:
if len(peak_coord_df) > 5000:
np.random.seed(10)
sample_range = np.arange(0, len(peak_coord_df))
rindex = np.random.choice(sample_range, 5000, replace=False) # random permutation
fimo_randn_df = peak_coord_df.loc[rindex]
# Make sure header = False; as nullgenerate seq would be upset with header:
fimo_randn_df.to_csv(join(output_dir, random_train_dir, TF_name + "_peaks_sample.bed"), sep="\t", header=False, index=False)
else:
peak_coord_df.to_csv(join(output_dir, random_train_dir, TF_name + "_peaks_sample.bed"), sep="\t", header=False, index=False)
# Set output dirs:
suboutput_dir = "kmer_svm_centpeaklevel/unique_TFs_motif"
if not os.path.exists(join(output_dir, suboutput_dir)):
os.makedirs(join(output_dir, suboutput_dir))
random_train_dir = "kmer_svm_centpeaklevel/random5000_samples"
if not os.path.exists(join(output_dir, random_train_dir)):
os.makedirs(join(output_dir, random_train_dir))
###### Random sampling at peak level:
def svm_centpeak_model(peak_file):
tf_name = re.compile(r".*narrowPeak_(.*)$").findall(basename(peak_file))[0]
df_read = pd.read_csv(peak_file, header=None, sep="\t")
df_read = df_read.iloc[:,[0,1,2]]
df_read.columns = ["chr", "start", "end"]
df_read["midpos"] = ((df_read["start"] + df_read["end"])/2).astype(int)
df_read["start"] = df_read["midpos"] - 50
df_read["end"] = df_read["midpos"] + 50
df_read["tf_name"] = tf_name
select_cols = ["chr","start","end", "tf_name"]
df_read = df_read.loc[:,select_cols]
print df_read.shape
df_read = df_read.sort_values(select_cols).reset_index(drop=True)
return(df_read)
regex_pat = re.compile(r".*narrowPeak_(.*)$")
for peak_file in bed_filelist: #
TF_name = regex_pat.findall(basename(peak_file))[0] #[('SL151597', 'SL151598', 'KLF6_v2[FLAG]')]
bed_file = bed_filedict[TF_name]
print("Currently processing : {} TF\n".format(TF_name))
peak_coord_df = svm_centpeak_model(peak_file)
peak_coord_df.to_csv(join(output_dir, suboutput_dir, TF_name + "_centpeaks.bed"), sep="\t", header=False, index=False)
# Random sampling of 5000 motif-sites for SVM train:
if len(peak_coord_df) > 5000:
np.random.seed(10)
sample_range = np.arange(0, len(peak_coord_df))
rindex = np.random.choice(sample_range, 5000, replace=False) # random permutation
fimo_randn_df = peak_coord_df.loc[rindex]
# Make sure header = False; as nullgenerate seq would be upset with header:
fimo_randn_df.to_csv(join(output_dir, random_train_dir, TF_name + "_centpeaks_sample.bed"), sep="\t", header=False, index=False)
else:
peak_coord_df.to_csv(join(output_dir, random_train_dir, TF_name + "_centpeaks_sample.bed"), sep="\t", header=False, index=False)
#################################################################
## Train SVM on random5000 samples and find PR-AUC for each TFs:
#################################################################
import numpy as np
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
# from sklearn.metrics import average_precision_score
# from sklearn.metrics import roc_auc_score
tf_namelist = []
PR_AUClist = []
output_dir= "/gpfs/gpfs1/home/schhetri/for_chris/batch_I/motifs_compinfo_analysis"
file_pat = "/gpfs/gpfs1/home/schhetri/for_chris/batch_I/motifs_compinfo_analysis/kmer_svm/random5000_samples/gkm_train_output/*.cvpred.txt"
crossval_filelist = glob(file_pat)
regex_pat = re.compile(r'^(.*)_motifs_sample.cvpred.txt')
for each in crossval_filelist:
tf_name = regex_pat.findall(basename(each))[0]
cv_svmscore_df = pd.read_csv(each, sep="\t", header=None)
cv_svmscore_df.columns = ["score_idx","score", "class", "crossfold_valset"]
cv_svmscore_df["score"] = cv_svmscore_df["score"].astype(float).round(4)
select_cols = ["score", "class"]
cv_svmscore_df = cv_svmscore_df.loc[:,select_cols]
# Assign labels and scores predicted by clf to compute PR_AUC:
y_test = cv_svmscore_df["class"]
y_scores = cv_svmscore_df["score"]
precision, recall, thresholds = precision_recall_curve(y_test, y_scores)
pr_areacurve = auc(recall, precision) # pr_areacurve = average_precision_score(y_test, y_scores)
tf_namelist.append(tf_name)
PR_AUClist.append(round(pr_areacurve,4))
print('Area Under PR Curve(AP): {0:0.4f}'.format(pr_areacurve))
# Alt method: The Area under Precision-Recall curve
# Alt method: The Area under an ROC(Receiver Operateing Characteristic) curve
# fpr, tpr, thresholds = roc_curve(y_test, y_scores)
# roc_areacurve = auc(fpr, tpr)
# Shortcut method if output is continuous probabitly; else use yscores = RandomForestClassifier.predict_proba(Xtest)[:,1]
# pr_areacurve = average_precision_score(y_test, y_scores)
# roc_areacurve = roc_auc_score(y_test, y_scores)
pr_auc_df = pd.DataFrame({"tf_name":tf_namelist, "pr_auc":PR_AUClist})
pr_auclist_mean = round(pr_auc_df["pr_auc"].mean(),2)
print('Mean Area Under PR Curve(AP) for TF_list: {}'.format(pr_auclist_mean))
pr_auc_df.to_csv(join(output_dir, "PRAUC_all_TF.txt"), sep="\t", header=True, index=False)
# Give annotation to all TFs:
anno_df = pd.read_csv(join(output_dir, "TFs_Annotation_file.txt"), sep="\t")
prauc_anno_df = pd.merge(pr_auc_df, anno_df, left_on="tf_name",right_on="Target", how="left")
prauc_anno_df.to_csv(join(output_dir, "PRAUC_all_TF_annotated.txt"), sep="\t", header=True, index=False)
from plotnine import *
import pandas as pd
from os.path import join
""" Local machine plotting """
plot_df = pd.read_csv("/Users/suryachhetri/Dropbox/for_genemodels/motifs_compinfo_analysis/kmer_svm/PRAUC_all_TF_annotated.txt", sep="\t")
# Boxplot:
out_dir = "/Users/suryachhetri/Dropbox/for_genemodels/motifs_compinfo_analysis/kmer_svm"
plot = (ggplot(plot_df) +
aes(y="pr_auc", x="annotation", fill="annotation")+
geom_boxplot(stat='boxplot') +
ggtitle("PRAUC distribution") +
# theme_bw() +
# scale_x_continuous(name="Principal Component 1") + scale_y_continuous(name="Principal Component 2") +
ylab("Precision Recall AUC") + xlab("TF Category") +
scale_fill_manual(name="IDEAS Anno",values=["blueviolet","orange","green","red", "burlywood"]) +
# guides(fill=guide_legend(title="IDEAS Anno"))
theme(
axis_title_y = element_text(size=12), #theme(axis_text_x=element_text(angle=45))
axis_title_x = element_text(size=12), #theme(axis_text_x=element_text(angle=45))
plot_title = element_text(size=14, face="bold"),
legend_title = element_text(size=8, face="bold")
# axis_text_y = element_text(size=1.3),
# axis_text_y = element_text(size=1.3)
)
)
plot
ggsave(plot,join(out_dir, "PRAUC_ideas_anno.pdf"))
plot = (ggplot(plot_df) +
aes(y="pr_auc", x="Category", fill="Category")+
geom_boxplot(stat='boxplot') +
ggtitle("Mean PR-AUC : 0.74") +
# theme_bw() +
# scale_x_continuous(name="Principal Component 1") + scale_y_continuous(name="Principal Component 2") +
ylab("Precision Recall AUC") + xlab("TF Category") +
scale_fill_manual(name="IDEAS Anno",values=["blueviolet","orange","green","red", "burlywood"]) +
# guides(fill=guide_legend(title="IDEAS Anno"))
theme(
axis_title_y = element_text(size=12), #theme(axis_text_x=element_text(angle=45))
axis_title_x = element_text(size=12), #theme(axis_text_x=element_text(angle=45))
plot_title = element_text(size=14, face="bold"),
legend_title = element_text(size=8, face="bold")
# axis_text_y = element_text(size=1.3),
# axis_text_y = element_text(size=1.3)
)
)
plot
ggsave(plot,join(out_dir, "PRAUC_dbf_crf.pdf"))
##################################################################
## Generate HOT motif-sites for downstream and svm-score analysis:
## using svm_fimo_motifs_model() func above, for *motifs.bed files
##################################################################
import pandas as pd, numpy as np
import pybedtools, pickle
from glob import glob
# Generate dict for fimo file list :
file_pat = "/gpfs/gpfs1/home/schhetri/for_chris/batch_I/fimo_motifs_total/idr_passed_motifs_total/unique_TFs/SL*fimo_motif*"
fimo_filelist = glob(file_pat)
suboutput_dir = "kmer_svm/fimo_motifs_for_hotanalysis"
if not os.path.exists(join(output_dir, suboutput_dir)):
os.makedirs(join(output_dir, suboutput_dir))
def hotsite_fimo_motifs_model(fimo_file, tf_name, **kwargs):
motif_list = []
for key, values in kwargs.iteritems():
motif_list.append(values)
df = pd.read_csv(fimo_file, sep="\t")
df.rename(columns={"sequence name" : "chrom", "#pattern name" : "motif_name"}, inplace=True)
df = df.loc[df["motif_name"].isin(motif_list)]
df["chromStart"] = df["start"] + 1 # confirmed with pybed fasta to map right sequence
df["chromEnd"] = df["stop"] + 2 # as bed intersection is exclusive of end coord
df["tf_name"] = tf_name
df["motif_id"] = "MOTIF" + df["motif_name"].astype(str)
select_cols = ["chrom", "chromStart", "chromEnd", "motif_id", "tf_name", "strand"]
motif_select_df = df.loc[:,select_cols]
print("Current dimension of motif model : {}".format(motif_select_df.shape))
# motif_select_df.duplicated(keep=False)
motif_select_df = motif_select_df.drop_duplicates()
print("Dropping duplicates if any, current dimension of motif model : {}\n".format(motif_select_df.shape))
motif_sorted_df = motif_select_df.sort_values(["chrom", "chromStart","chromEnd"]).reset_index(drop=True)
return(motif_sorted_df)
regex_pat = re.compile(r'.*fimo_motif_(.*).txt$')
for fimo_file in fimo_filelist: #
TF_name = regex_pat.findall(basename(fimo_file))[0] #[('SL151597', 'SL151598', 'KLF6_v2[FLAG]')]
print("Currently processing : {} TF\n".format(TF_name))
fimo_coord_df = hotsite_fimo_motifs_model(fimo_file, TF_name, motif1=1, motif2=2, motif3=3, motif4=4, motif5=5)
fimo_coord_df.to_csv(join(output_dir, suboutput_dir, TF_name + "_motifs.bed"), sep="\t", header=False, index=False)
# Generate hot-sites with proccessed fimo motif files:
file_pat = join(output_dir, suboutput_dir, "*motifs.bed")
fimo_motif_filelist = glob(file_pat)
prehot_filelist = []
for each in fimo_motif_filelist:
prehot_df = pd.read_csv(each, sep="\t",header=None)
prehot_df.columns = ["chrom", "chromStart", "chromEnd", "motif_id", "tf_name", "strand"]
linenum_df = pd.Series(prehot_df.index.values).astype(str)
prehot_df["id"] = prehot_df["tf_name"] + "|" + linenum_df
prehot_df["motif_tag"] = prehot_df["tf_name"] + "|" + prehot_df["motif_id"]
prehot_df["motif_linetag"] = prehot_df["tf_name"] + "|" + prehot_df["motif_id"] + "|" + linenum_df
prehot_filelist.append(prehot_df)
# combine prehot dataframe for hotsite generation:
combined_prehot_df = pd.concat(prehot_filelist, ignore_index=True)
sorted_prehot_df = combined_prehot_df.sort_values(["chrom", "chromStart", "chromEnd"]).reset_index(drop=True)
prehot_pybed = pybedtools.BedTool.from_dataframe(sorted_prehot_df)
merge_hot_pybed = prehot_pybed.merge(c=[5,5,8,8,9,7], o=["count","count_distinct","count", "count_distinct", "collapse", "collapse"])
merge_hot_pybed_df = pd.read_csv(merge_hot_pybed.fn, sep="\t", header=None)
merge_hot_pybed_df.columns = ["chrom", "chromStart", "chromEnd", "total_tfcount", "uniq_tfcount", "total_motif_count", "distinct_motif_count", "merged_hotmotif_id", "id"]
select_cols = ["chrom", "chromStart", "chromEnd", "total_tfcount", "uniq_tfcount", "distinct_motif_count", "merged_hotmotif_id", "id" ]
final_hotmotif_df = merge_hot_pybed_df.loc[:,select_cols]
final_hotmotif_df = final_hotmotif_df.sort_values(["uniq_tfcount"]).reset_index(drop=True)
# Binning HOT motif-sites
bins = [1,2,3,4,5,10,20,30,40,50,70,100,500]
names = ["1", "2", "3", "4", "5-9", "10-19", "20-29", "30-39", "40-49", "50-69", "70-99", "100+"]
bins_1 = [1,5,10,20,30,40,50,70,100,500]
names_1 = ["1-4", "5-9", "10-19", "20-29", "30-39", "40-49", "50-69", "70-99", "100+"]
final_hotmotif_df['binned_tf_count'] = pd.cut(final_hotmotif_df["uniq_tfcount"], bins, right=False, labels=names)
final_hotmotif_df['binned_tf_count_1'] = pd.cut(final_hotmotif_df["uniq_tfcount"], bins_1, right=False, labels=names_1)
final_hotmotif_df.to_csv(join(output_dir, "Hotmotif_sites.bed"), sep="\t", header=True, index=False)
final_hotmotif_df.to_pickle(join(output_dir,"Hotmotif_sites.pkl"))
# Frequency count:
hotsite_freq_count = final_hotmotif_df["uniq_tfcount"].value_counts().reset_index(name="site_count").rename(columns={"index":"uniq_tfcount"})
binned_hotsite_freq_count = final_hotmotif_df["binned_tf_count"].value_counts().reset_index(name="site_count").rename(columns={"index":"uniq_tfcount"})
from plotnine import *
import pandas as pd
from os.path import join
""" Local machine plotting """
test = pd.read_csv("/Users/suryachhetri/Dropbox/for_genemodels/motifs_compinfo_analysis/kmer_svm/Hotmotif_sites.bed", sep="\t")
test["diff"] = test.chromEnd - test.chromStart
test["diff"].mean(), test["diff"].max(), test["diff"].min()
plot_df = test["binned_tf_count_1"].value_counts().reset_index(name="site_count").rename(columns={"index":"uniq_tfcount"})
plot_df["log2(site_count)"] = np.log2(plot_df["site_count"])
plot_df["site_count"].sum()
# Order the factor/categorical variable to color legend accordingly:
plot_df["uniq_tfcount_new"] = pd.Categorical(plot_df["uniq_tfcount"], categories=["1-4", "5-9", "10-19", "20-29", "30-39", "40-49", "50-69", "70-99", "100+"], ordered=True)
out_dir = "/Users/suryachhetri/Dropbox/for_genemodels/motifs_compinfo_analysis/kmer_svm"
plot = (ggplot(plot_df) +
aes(y="log2(site_count)", x="uniq_tfcount_new")+
geom_bar(stat='identity') +
ggtitle("Hotsites dist with number of TFs cobound") +
theme_bw() +
# scale_x_continuous(name="Principal Component 1") + scale_y_continuous(name="Principal Component 2") +
ylab("Log2(Site Counts)") + xlab("Unique TFs co-bound") +
#scale_fill_manual(name="IDEAS Anno",values=["blueviolet","orange","green","red", "burlywood"]) +
# guides(fill=guide_legend(title="TFs Co-Bound")) +
theme(
axis_title_y = element_text(size=12), #theme(axis_text_x=element_text(angle=45))
axis_title_x = element_text(size=12), #theme(axis_text_x=element_text(angle=45))
plot_title = element_text(size=14, face="bold"),
legend_title = element_text(size=8, face="bold")
# axis_text_y = element_text(size=1.3),
# axis_text_y = element_text(size=1.3)
)
)
plot
ggsave(plot,join(out_dir, "Hotmotif_sites_barplot_distribution_Figure.pdf"))
##############################################################################
## Generate dictionary for TF SVM-scores for each TFs for downstream analysis:
##############################################################################
import re, pickle
from glob import glob
from os.path import join, basename
# Create SVMscore dict for cross-fold validation sets(only on null seq scores)
cv_svmscoredict = {}
file_pat = "/gpfs/gpfs1/home/schhetri/for_chris/batch_I/motifs_compinfo_analysis/kmer_svm/random5000_samples/gkm_train_output/*.cvpred.txt"
crossval_filelist = glob(file_pat)
regex_pat = re.compile(r'^(.*)_motifs_sample.cvpred.txt')
for each in crossval_filelist:
tf_name = regex_pat.findall(basename(each))[0]
cv_svmscore_df = pd.read_csv(each, sep="\t", header=None)
cv_svmscore_df.columns = ["score_idx","score", "class", "crossfold_valset"]
cv_svmscore_df["score"] = cv_svmscore_df["score"].astype(float).round(4)
select_cols = ["score", "class"]
cv_svmscore_df = cv_svmscore_df.loc[:,select_cols]
# Select only null seq i.e class -1 for later usage
cv_svmscore_df = cv_svmscore_df.loc[cv_svmscore_df["class"] == -1 ]
svm_scoredict = {}
tf_svm_scoredict = cv_svmscore_df.to_dict(orient="list")
svm_scoredict[tf_name] = tf_svm_scoredict
cv_svmscoredict.update(svm_scoredict)
filename = join(output_dir, "kmer_svm", "cv_nullseq_svmscores.pkl")
fileobj = open(filename, 'wb')
pickle.dump(cv_svmscoredict, fileobj)
fileobj.close()
# with open(filename, "rb") as readobj:
# cv_svmscoredict = pickle.load(readobj)
# Create SVMscore dict for scored DNA sequence:
master_tf_svmscoredict = {}
file_pat = "/gpfs/gpfs1/home/schhetri/for_chris/batch_I/motifs_compinfo_analysis/kmer_svm/random5000_samples/gkm_predict_output/*gkmpredict.scores"
filelist = glob(file_pat)
regex_pat = re.compile(r'^(.*)_motifs.gkmpredict.scores')
for each in filelist:
tf_name = regex_pat.findall(basename(each))[0]
svmscore_df = pd.read_csv(each, sep="\t", header=None)
svmscore_df.columns = ["score_idx","score"]
svmscore_df["score"] = svmscore_df["score"].astype(float).round(4)
svmscore_df["id"] = tf_name + "|" + svmscore_df.index.astype(str)
select_cols = ["score", "id"]
svmscore_df = svmscore_df.loc[:,select_cols]
if svmscore_df.shape[0] > 6000:
np.random.seed(10)
sample_range = np.arange(0, len(svmscore_df))
rindex = np.random.choice(sample_range, 5000, replace=False) # random permutation
random_sample_set = set(rindex)
total_df_set = set(svmscore_df.index)
testset_leftidx = list(total_df_set - random_sample_set)
test_sample_df = svmscore_df.loc[testset_leftidx] # testset_df.to_dict(orient="list")
svm_scoredict = {}
tf_svm_scoredict = test_sample_df.set_index(["id"]).to_dict("index")
svm_scoredict[tf_name] = tf_svm_scoredict
master_tf_svmscoredict.update(svm_scoredict)
else:
svm_scoredict = {}
tf_svm_scoredict = svmscore_df.set_index(["id"]).to_dict("index")
svm_scoredict[tf_name] = tf_svm_scoredict
master_tf_svmscoredict.update(svm_scoredict)
filename = join(output_dir, "kmer_svm", "tf_svmscores.pkl")
fileobj = open(filename, 'wb')
pickle.dump(master_tf_svmscoredict, fileobj)
fileobj.close()
# with open(filename, "rb") as readobj:
# master_tf_svmscoredict = pickle.load(readobj)
#######################################################################################
## Downstream analysis using hotsites-tfbound_file cv-and-tf svmscore dictionary above:
#######################################################################################
import pandas as pd
import re, pickle
from glob import glob
from os.path import join, basename
output_dir= "/gpfs/gpfs1/home/schhetri/for_chris/batch_I/motifs_compinfo_analysis"
final_hotmotif_df = pd.read_pickle(join(output_dir,"Hotmotif_sites.pkl"))
final_hotmotif_df.rename(columns={"uniq_tfcount" : "num_tfbound", "id" : "hotsite_idx"}, inplace=True)
final_hotmotif_df = final_hotmotif_df.loc[:, ["num_tfbound", "hotsite_idx"]]
# pd.read_csv(join(output_dir, "Hotmotif_sites.bed"), sep="\t")
filename = join(output_dir, "kmer_svm", "cv_nullseq_svmscores.pkl")
with open(filename, "rb") as readobj:
cv_svmscoredict = pickle.load(readobj)
filename = join(output_dir, "kmer_svm", "tf_svmscores.pkl")
with open(filename, "rb") as readobj:
master_tf_svmscoredict = pickle.load(readobj)
# For hotsite problem 1:
num_tfbound_list = []
tfscore_list = []
tf_namelist = []
# For hotsite problem 2:
master_list = []
# Hotsites dataframe - for problem 1 and 2 both:
for idx,row in final_hotmotif_df.iterrows():
tfid_splitted=row["hotsite_idx"].split(",")
num_tfbound =row["num_tfbound"]
for each_id in tfid_splitted:
# print num_tfbound, each_id
tf_name = each_id.split("|")[0]
if tf_name in master_tf_svmscoredict:
if master_tf_svmscoredict[tf_name].get(each_id):
tf_svmscore = master_tf_svmscoredict[tf_name].get(each_id)["score"]
tfscore_list.append(tf_svmscore)
num_tfbound_list.append(num_tfbound)
tf_namelist.append(tf_name)
# For hotsite problem 2:
master_list.append([idx, num_tfbound, tf_svmscore, each_id, tf_name])
else:
tfscore_list.append(None)
num_tfbound_list.append(num_tfbound)
tf_namelist.append(tf_name)
# For hotsite problem 2:
master_list.append([idx, num_tfbound, None, each_id, tf_name])
# For hotsite problem 1:
tf_svmscore_df = pd.concat([pd.Series(num_tfbound_list), pd.Series(tfscore_list), pd.Series(tf_namelist)], axis=1)
tf_svmscore_df.columns = ["tf_cobound", "svm_score", "tf_name"]
# Binning HOT motif-sites
bins = [1,2,3,4,5,10,20,30,40,50,70,100,500]
names = ["1", "2", "3", "4", "5-9", "10-19", "20-29", "30-39", "40-49", "50-69", "70-99", "100+"]
bins_1 = [1,5,10,20,30,40,50,70,100,500]
names_1 = ["1-4", "5-9", "10-19", "20-29", "30-39", "40-49", "50-69", "70-99", "100+"]
tf_svmscore_df['binned_tf_count'] = pd.cut(tf_svmscore_df["tf_cobound"], bins, right=False, labels=names)
tf_svmscore_df['binned_tf_count_1'] = pd.cut(tf_svmscore_df["tf_cobound"], bins_1, right=False, labels=names_1)
tf_svmscore_df.to_csv(join(output_dir,"Hotmotif_sites_problem1_data.txt"), header=True, index=False, sep="\t")
# Create boxplot dataframe for hotsites and distribution of clf values:
nullseq_scorelist = []
for each_tf in cv_svmscoredict:
nullseq_svmscore_df = pd.DataFrame(cv_svmscoredict[each_tf])
nullseq_scorelist.append(nullseq_svmscore_df)
combined_nullseq = pd.concat(nullseq_scorelist, ignore_index=True)
combined_nullseq["binned_tf_count_1"] = "Matched_null"
combined_nullseq_df = combined_nullseq.loc[:,["binned_tf_count_1", "score"]]
combined_nullseq_df.columns = ["cobound_tf_bins", "svm_score"]
tf_svmscore_boxdf = tf_svmscore_df.loc[:,["binned_tf_count_1", "svm_score"]]
tf_svmscore_boxdf.columns = ["cobound_tf_bins", "svm_score"]
boxplot_svmscore_df = pd.concat([tf_svmscore_boxdf,combined_nullseq_df], ignore_index=True)
boxplot_svmscore_df.to_csv(join(output_dir,"Hotmotif_sites_problem1_boxplot_data.txt"), header=True, index=False, sep="\t")
################################################
# For hotsite problem 2 (master list df) and TF_bound >=50
master_tf_svmscore_df = pd.DataFrame(master_list)
master_tf_svmscore_df.columns = ["final_hotsite_idx", "tf_bound", "score", "id", "tf_name"]
master_tf_svmscore_df = master_tf_svmscore_df.loc[(master_tf_svmscore_df["tf_bound"]>=50)]
# Handles the case with hotsites containing more than 1 peaks or motifs
# for same factor like FOXA3 and KDM2A at hotsite 4 and 6 in hepg2 hotsites:
df_grouped = master_tf_svmscore_df.groupby(["final_hotsite_idx", "tf_bound", "tf_name"])["score"].mean().reset_index(name="svmscore")
df_final = df_grouped.pivot(index="final_hotsite_idx", columns="tf_name", values="svmscore")
# For hotsite problem 2 # Rank hotsites with TFs_svmscore or classifier value or less : ()
df_rank = df_final.rank(ascending=False,method="dense",pct=True)
# Total frequency of hotsites containing top 5% classifier value for any TF present:
df_rank["5perc_present"] = df_rank.apply(lambda row : row <= 0.05, axis=0).astype(int).apply(np.sum, axis=1)
df_rank["75perc_present"] = df_rank.apply(lambda row : row > 0.25, axis=0).astype(int).apply(np.sum, axis=1)
# df_rank_merged = pd.merge(df_rank.reset_index(), master_tf_svmscore_df.loc[:,["final_hotsite_idx", "tf_bound"]], on = "final_hotsite_idx")
# df_rank_final = df_rank_merged.drop_duplicates()
df_top5 = df_rank.reset_index(drop=True)[["5perc_present"]]
df_top5["percent_classifier"] = "top5_percent"
df_top5.rename(columns={"5perc_present": "num_bound_tfs"}, inplace=True)
df_bottom75 = df_rank[["75perc_present"]]
df_bottom75["percent_classifier"] = "bottom75_percent"
df_bottom75.rename(columns={"75perc_present": "num_bound_tfs"}, inplace=True)
df_rank_top_bottom = pd.concat([df_top5, df_bottom75], ignore_index=True)
df_rank_top_bottom.to_csv(join(output_dir,"Hotmotif_sites_problem2_histogram_data.txt"), header=True, index=False, sep="\t")
###############################################
# For hotsite problem 3 (master list df) - for piechart:
df_rank_melt = pd.melt(df_rank.reset_index(), id_vars=['final_hotsite_idx'], var_name = ["tf_name"], value_vars=df_rank.columns[:-2].tolist())
max_classifier_val_idx = df_rank_melt.groupby(["final_hotsite_idx"])["value"].idxmin()
hotsite_piechart_df = df_rank_melt.loc[max_classifier_val_idx]
hotsite_piechart_final_df = hotsite_piechart_df["tf_name"].value_counts()
hotsite_piechart_final_df = hotsite_piechart_final_df.reset_index(name="hotsite_count")
# Though total hotsite is # hotsite_piechart_df.shape[0] i.e 2040, giving 1
hotsite_piechart_final_df["total_hotsite"] = hotsite_piechart_final_df[hotsite_piechart_final_df["hotsite_count"]>=1].shape[0] # 2040
hotsite_piechart_final_df["hotsite_fraction_w_recurring_motif"] = ((hotsite_piechart_final_df["hotsite_count"])/2040)*100 #
hotsite_piechart_final_df["reshaped_percent"] = ((hotsite_piechart_final_df["hotsite_count"])/1000)*100 #
hotsite_piechart_final_df.to_csv(join(output_dir,"Hotmotif_sites_problem3_piechart_data.txt"), header=True, index=False, sep="\t")
###################### if needed; else ignore this analysis #########################
# Grouping by TF bound gives us the frequency of hotsites with TF classifier value
# Give score of 1 if present that is (more than 1 TF with classifier value 0.05);
df_rank["5perc_binary"] = np.where(df_rank["5perc_present"] > 0, 1, 0 )
df_rank_merged = pd.merge(df_rank.reset_index(), master_tf_svmscore_df.loc[:,["final_hotsite_idx", "tf_bound"]], on = "final_hotsite_idx")
df_rank_final = df_rank_merged.drop_duplicates()
df_rank_final.groupby(["tf_bound"])["5perc_binary"].sum().reset_index(name="5perc_present")
###################### if needed; else ignore above analysis #########################
from plotnine import *
import pandas as pd
from os.path import join
""" Local machine plotting """
plot_df = pd.read_csv("/Users/suryachhetri/Dropbox/for_genemodels/motifs_compinfo_analysis/kmer_svm/Hotmotif_sites_problem1_boxplot_data.txt", sep="\t")
# Order the factor/categorical variable to color legend accordingly:
plot_df["cobound_tf_bins_new"] = pd.Categorical(plot_df["cobound_tf_bins"], categories=["1-4", "5-9", "10-19", "20-29", "30-39", "40-49", "50-69", "70-99", "100+", "Matched_null"], ordered=True)
out_dir = "/Users/suryachhetri/Dropbox/for_genemodels/motifs_compinfo_analysis/kmer_svm"
plot = (ggplot(plot_df) +
aes(y="svm_score", x="cobound_tf_bins_new", fill="cobound_tf_bins_new")+
geom_boxplot(stat='boxplot', outlier_shape="None") +
ggtitle("SVM weights distribution") +
theme_bw() +
# scale_x_continuous(name="Principal Component 1") + scale_y_continuous(name="Principal Component 2") +
ylab("SVM classifier scores") + xlab("Number of TFs co-bound") +
#scale_fill_manual(name="IDEAS Anno",values=["blueviolet","orange","green","red", "burlywood"]) +
guides(fill=guide_legend(title="TFs Co-Bound")) +
theme(
axis_title_y = element_text(size=12), #theme(axis_text_x=element_text(angle=45))
axis_title_x = element_text(size=12), #theme(axis_text_x=element_text(angle=45))
plot_title = element_text(size=14, face="bold"),
legend_title = element_text(size=8, face="bold")
# axis_text_y = element_text(size=1.3),
# axis_text_y = element_text(size=1.3)
)
)
# plot
ggsave(plot,join(out_dir, "Hotmotif_sites_problem1_boxplot_svm_clf_weights_Figure.pdf"))
plot_df = pd.read_csv("/Users/suryachhetri/Dropbox/for_genemodels/motifs_compinfo_analysis/kmer_svm/Hotmotif_sites_problem2_histogram_data.txt", sep="\t")
out_dir = "/Users/suryachhetri/Dropbox/for_genemodels/motifs_compinfo_analysis/kmer_svm"
plot = (ggplot(plot_df) +
aes(x="num_bound_tfs", fill="percent_classifier")+
geom_histogram(stat ='bin', binwidth=1) +
ggtitle("Ranked Classifier-Weights Distribution") +
# theme_bw() +
# scale_x_continuous(name="Principal Component 1") + scale_y_continuous(name="Principal Component 2") +
ylab("Number of Hotsites(>=50 TFs cobound)") + xlab("Number of bound TFs with SVM classifier values (each site)") +
#scale_fill_manual(name="IDEAS Anno",values=["blueviolet","orange","green","red", "burlywood"]) +
guides(fill=guide_legend(title="Ranked Classifier Value")) +
theme(
axis_title_y = element_text(size=12), #theme(axis_text_x=element_text(angle=45))
axis_title_x = element_text(size=12), #theme(axis_text_x=element_text(angle=45))
plot_title = element_text(size=14, face="bold"),
legend_title = element_text(size=8, face="bold")
# axis_text_y = element_text(size=1.3),
# axis_text_y = element_text(size=1.3)
)
)
plot
ggsave(plot,join(out_dir, "Hotmotif_sites_problem2_histogram_svm_clf_value_figure.pdf"))
| [
"chhetribsurya@gmail.com"
] | chhetribsurya@gmail.com |
374048fea69e0a66ffbb5fa9fa42e53287967cc4 | 7110d018f0474388e18bc9c760f306c5b8593aae | /app/client/models.py | 4d9febe84b05dc0166fdb6c8333590e5ad974d84 | [
"MIT"
] | permissive | alexiuasse/django-system-iuasse | 6a9a4d27fee5ec864fe6ec5901b539aba2e9b0a5 | fa22519f2d2b6256e5334f032a95fd19366e5df4 | refs/heads/main | 2023-05-14T21:25:35.021196 | 2021-06-03T17:00:56 | 2021-06-03T17:00:56 | 369,536,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,817 | py | from django.db import models
from django.utils import timezone
from django.utils.translation import gettext as _
from django.urls import reverse_lazy
from app.models import TimeStampMixin
class Occupation(TimeStampMixin):
""" Model to set the Occupation, used to identify a client."""
name = models.CharField(verbose_name=_("Name"),
max_length=128,
blank=False,
null=False)
def __str__(self) -> str:
return self.name
class Client(TimeStampMixin):
""" Model to specify a client."""
name = models.CharField(verbose_name=_("Name"),
max_length=128)
email = models.EmailField(verbose_name=_("E-mail"),
blank=True,
null=True)
address = models.TextField(verbose_name=_("Address"),
blank=True,
null=True)
# max_length is in real 16 (66) 9 9205-4030 but with the mask it must be 17
phone = models.CharField(verbose_name=_("Phone"),
max_length=17,
blank=True,
null=True)
birthday = models.DateField(verbose_name=_("Birthday"),
blank=True,
null=True)
occupation = models.ForeignKey("client.Occupation",
on_delete=models.SET_NULL,
null=True,
blank=True,
verbose_name=_("Occupation"))
date = models.DateField(
verbose_name=_("Date"),
default=timezone.now,
help_text=_("This date is used for statistics, build charts. "),
)
def __str__(self) -> str:
return self.name
def get_absolute_url(self):
return reverse_lazy(f'{self._meta.app_label}:{self._meta.model_name}:details')
@staticmethod
def get_exclude_fields():
"""
Fields of the current model that is marked to get excluded from visualization.
"""
return []
def get_add_fields(self):
"""
Custom fields to be added for visualization. Need to be a dict with {'name': content}
"""
return {}
def get_dict_data(self):
"""
This method automatically gathers all the fields in the current model and returns them as a dictionary, used mainly to build a layout.
"""
exclude = self.get_exclude_fields()
data = dict([(field.verbose_name, getattr(self, field.name))
for field in self._meta.fields if field.name not in exclude])
data.update(self.get_add_fields())
return data
| [
"alexiuasse@gmail.com"
] | alexiuasse@gmail.com |
f1cc588717cf958753e27ba5146ec48527a2a847 | 834e1884dcd2551a7f2610c5d65199103a14af67 | /Notes/kivyA/kivyCalculatorA/main.py | c8c899b653b9dd10de9635ff18b06bf128fbf3a3 | [] | no_license | fwparkercode/Programming2_SP2019 | b8ece0fe98fe8260ff20859c80a73ce5ba38a5e6 | 4938a66979cd9f7cbb82d3d447a5ae2bfd6766b0 | refs/heads/master | 2020-04-19T05:07:43.800906 | 2019-05-23T21:23:15 | 2019-05-23T21:23:15 | 167,978,933 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.core.window import Window
Window.size = (300, 400)
class CalculatorApp(App):
def build(self):
return CalculatorLayout()
class CalculatorLayout(BoxLayout):
# root widget
def calculate(self):
answer = eval(self.display.text)
self.display.text = str(answer)
if __name__ == "__main__":
app = CalculatorApp()
app.run() | [
"alee@fwparker.org"
] | alee@fwparker.org |
1415d8bba5a688bff8491421c00411f76e869d4e | ac1fdf53359b53e183fb9b2602328595b07cf427 | /ParlAI/parlai/tasks/convai_chitchat/__init__.py | 5799539a22004d2cd8989bf2143289815efc44f1 | [] | no_license | Ufukdogann/MasterThesis | 780410c5df85b789136b525bce86ba0831409233 | b09ede1e3c88c4ac3047800f5187c671eeda18be | refs/heads/main | 2023-01-24T18:09:52.285718 | 2020-11-27T16:14:29 | 2020-11-27T16:14:29 | 312,416,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:3fd485c3f7deada658caa392a4950fbf1531c77db9ba58fa9948d13d9d8ea490
size 222
| [
"134679852Ufuk*"
] | 134679852Ufuk* |
5bc28abe7faa596eada6d769cc702ae804e44e90 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-1/7da9bb68ce9ad08b3a648b49d52eddd1cc153137-<delete_dag>-bug.py | c76a57772d4c18f939e94cb8e62aacf36607bf5b | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,376 | py |
def delete_dag(dag_id, keep_records_in_log=True):
'\n :param dag_id: the dag_id of the DAG to delete\n :type dag_id: str\n :param keep_records_in_log: whether keep records of the given dag_id\n in the Log table in the backend database (for reasons like auditing).\n The default value is True.\n :type keep_records_in_log: bool\n '
session = settings.Session()
DM = models.DagModel
dag = session.query(DM).filter((DM.dag_id == dag_id)).first()
if (dag is None):
raise DagNotFound('Dag id {} not found'.format(dag_id))
if (dag.fileloc and (not os.path.exists(dag.fileloc))):
raise DagFileExists('Dag id {} is still in DagBag. Remove the DAG file first: {}'.format(dag_id, dag.fileloc))
count = 0
for m in models.base.Base._decl_class_registry.values():
if hasattr(m, 'dag_id'):
if (keep_records_in_log and (m.__name__ == 'Log')):
continue
cond = or_((m.dag_id == dag_id), m.dag_id.like((dag_id + '.%')))
count += session.query(m).filter(cond).delete(synchronize_session='fetch')
if dag.is_subdag:
(p, c) = dag_id.rsplit('.', 1)
for m in (models.DagRun, models.TaskFail, models.TaskInstance):
count += session.query(m).filter((m.dag_id == p), (m.task_id == c)).delete()
session.commit()
return count
| [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
ab913b549c945e35ccf4e56526cc50b5f608c3bb | 69f43872d7ca655912dcf46d246702e5ee5b1068 | /apps/sad/models.py | e9976ad1406a6342062239d786ead6d7a03ce241 | [
"BSD-3-Clause"
] | permissive | tiposaurio/backenddj | d0d5884d1880c9b09b20aaa35e20da0173520535 | e16f609eca0c292c8c694c7eb279fe99296b45e3 | refs/heads/master | 2021-01-16T21:51:00.352923 | 2014-05-16T03:59:52 | 2014-05-16T03:59:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,833 | py | # _*_ coding: utf-8 _*_
"""
@copyright Copyright (c) 2014 Submit Consulting
@author Angel Sullon (@asullom)
@package sad
Descripcion: Registro de los modelos de la app sad
"""
from django.db import models
from django.contrib.auth.models import User, Group, Permission
from apps.params.models import Person
from apps.space.models import Solution, Association, Enterprise, Headquar
# Usaremos las tablas de django:
# User
# Group (para nosotros Perfil)
# Permission+ContentType (para nosostros Recursos)
class Profile(models.Model):
"""
Tabla que amplia la informacion de los usuarios del sistema
"""
last_headquar_id = models.CharField(max_length=50, null=True, blank=True)
last_module_id = models.CharField(max_length=50, null=True, blank=True)
user = models.OneToOneField(User)
person = models.OneToOneField(Person)
class Meta:
permissions = (
# ("profile", "Puede hacer TODAS las operaciones del perfil"),
)
def __unicode__(self):
return self.user.username
'''
def create_user_profile(sender, instance, created, **kwargs):
if created :
Profile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User)
'''
class UserState(models.Model):
"""
Tabla que registra el historial de los estados de los usuarios
"""
ON = "ON"
OFF = "OFF"
USER_STATES = (
(ON, "Activate"),
(OFF, "Deactivate"),
)
state = models.CharField(max_length=50, choices=USER_STATES, default=ON)
description = models.TextField(null=True, blank=True)
user = models.ForeignKey(User)
registered_at = models.DateTimeField(auto_now_add=True)
class Meta:
permissions = (
# ("profile", "Puede hacer TODAS las operaciones del perfil"),
)
def __unicode__(self):
return "%s %s" % (self.user.username, self.state)
'''
def create_user_profile(sender, instance, created, **kwargs):
if created :
Profile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User)
'''
class Access(models.Model):
"""
Tabla que registra los accesos de los usuarios al sistema
"""
INPUT = "INPUT"
OUTPUT = "OUTPUT"
ACCESS_TYPES = (
(INPUT, "Input"),
(OUTPUT, "Output"),
)
access_type = models.CharField(max_length=50, choices=ACCESS_TYPES, default=INPUT)
ip = models.CharField(max_length=50, null=True, blank=True)
session_key = models.TextField(null=True, blank=True)
user = models.ForeignKey(User)
registered_at = models.DateTimeField(auto_now_add=True)
class Meta:
permissions = (
("access", "Puede hacer TODAS las operaciones del access"),
)
def __unicode__(self):
return "%s %s" % (self.user.username, self.access_type)
class Backup(models.Model):
"""
Tabla que registra los accesos de los usuarios al sistema
"""
file_name = models.CharField(max_length=50)
description = models.TextField(null=True, blank=True)
size = models.CharField(max_length=50, null=True, blank=True)
user = models.ForeignKey(User)
registered_at = models.DateTimeField(auto_now_add=True)
class Meta:
permissions = (
("backup", "Puede hacer TODAS las operaciones de backup"),
)
def __unicode__(self):
return self.file_name
class Module(models.Model):
"""
Modulos del sistema
"""
PRO = "PRO"
WEB = "WEB"
VENTAS = "VENTAS"
BACKEND = "BACKEND"
MODULES = (
(PRO, "Profesional"),
(WEB, "Web informativa"),
(VENTAS, "Ventas"),
(BACKEND, "Backend Manager"),
)
module = models.CharField(max_length=50, choices=MODULES, default=BACKEND)
name = models.CharField(max_length=50)
is_active = models.BooleanField(default=True)
icon = models.CharField(max_length=50, null=True, blank=True)
description = models.TextField(null=True, blank=True)
registered_at = models.DateTimeField(auto_now_add=True)
modified_in = models.DateTimeField(auto_now=True)
solutions = models.ManyToManyField(Solution, verbose_name="solutions", null=True, blank=True)
groups = models.ManyToManyField(Group, related_name="module_set", verbose_name="groups", null=True, blank=True) # verbose_name es para Module
initial_groups = models.ManyToManyField(Group, related_name="initial_groups_module_set", verbose_name="initial_groups", null=True, blank=True) # related_name cambia module_set x initial_groups_module_set
class Meta:
permissions = (
("module", "Puede hacer TODAS las operaciones de modulos"),
)
def __unicode__(self):
return "%s %s" % (self.module, self.name)
class Menu(models.Model):
"""
Menús del sistema.
"""
MODULES = Module.MODULES
module = models.CharField(max_length=50, choices=MODULES, default=Module.BACKEND)
title = models.CharField(max_length=50)
url = models.CharField(max_length=150, default="#")
pos = models.IntegerField(max_length=50, default=1)
icon = models.CharField(max_length=50, null=True, blank=True, default="")
is_active = models.BooleanField(default=True)
description = models.TextField(null=True, blank=True)
registered_at = models.DateTimeField(auto_now_add=True)
modified_in = models.DateTimeField(auto_now=True)
permission = models.ForeignKey(Permission, null=True, blank=True)
parent = models.ForeignKey("Menu", verbose_name="parent", null=True, blank=True) # related_name="parent",
class Meta:
permissions = (
("menu", "Puede hacer TODAS las operaciones de menús"),
)
def __unicode__(self):
return "%s %s" % (self.module, self.title)
class UserProfileEnterprise(models.Model):
"""
Permisos a nivel de empresa
"""
# is_admin = models.BooleanField(default=False)
registered_at = models.DateTimeField(auto_now_add=True)
modified_in = models.DateTimeField(auto_now=True)
user = models.ForeignKey(User)
group = models.ForeignKey(Group)
enterprise = models.ForeignKey(Enterprise)
class Meta:
permissions = (
# ("userprofileenterprise", "Puede hacer TODAS las operaciones de userprofileenterprise"),
)
def __unicode__(self):
return "%s %s - %s" % (self.user.username, self.group.name, self.enterprise.name)
class UserProfileHeadquar(models.Model):
"""
Permisos a nivel de sede
"""
# is_admin = models.BooleanField(default=False)
registered_at = models.DateTimeField(auto_now_add=True)
modified_in = models.DateTimeField(auto_now=True)
user = models.ForeignKey(User)
group = models.ForeignKey(Group)
headquar = models.ForeignKey(Headquar)
class Meta:
permissions = (
# ("userprofileheadquar", "Puede hacer TODAS las operaciones de userprofileheadquar"),
)
def __unicode__(self):
return "%s %s - %s" % (self.user.username, self.group.name, self.headquar.name)
class UserProfileAssociation(models.Model):
"""
Permisos a nivel de association
"""
# is_admin = models.BooleanField(default=False)
registered_at = models.DateTimeField(auto_now_add=True)
modified_in = models.DateTimeField(auto_now=True)
user = models.ForeignKey(User)
group = models.ForeignKey(Group)
association = models.ForeignKey(Association)
class Meta:
permissions = (
# ("userprofileassociation", "Puede hacer TODAS las operaciones de userprofileassociation"),
)
def __unicode__(self):
return "%s %s - %s" % (self.user.username, self.group.name, self.association.name)
| [
"asullom@gmail.com"
] | asullom@gmail.com |
db3983df30bc88f8057cdd776e83c68efde56582 | 06acfec630a30695d79b77df4d682cb27dce64a2 | /wev/test_resolver.py | 1d9bd12b34fad7aa80bbb99495a328793a2b8fdc | [] | permissive | cariad/wev | b828bd7b5b47117bf571ae1e3e3a60abb619f396 | 2abf38485d86c593bbedcb7c295143104e385b77 | refs/heads/main | 2023-04-11T20:15:40.548185 | 2021-05-13T06:50:05 | 2021-05-13T06:50:05 | 317,793,612 | 5 | 0 | MIT | 2021-05-13T06:50:06 | 2020-12-02T08:14:41 | Python | UTF-8 | Python | false | false | 2,559 | py | from datetime import datetime, timedelta
from typing import Iterator, Optional
from mock import Mock, patch
from pytest import fixture, mark, raises
from wev.mock_plugin import MockPlugin
from wev.resolver import fresh_resolution, resolve
from wev.sdk import PluginBase, Resolution
from wev.sdk.exceptions import CannotResolveError
from wev.state import MockState
@fixture
def get_plugin() -> Iterator[PluginBase]:
plugin = MockPlugin({}, return_value=("(value)",), return_expires_at=True)
with patch("wev.resolver.get_plugin", return_value=plugin) as patched:
yield patched
@fixture
def get_non_caching_plugin() -> Iterator[PluginBase]:
plugin = MockPlugin({}, return_value=("(value)",), return_expires_at=False)
with patch("wev.resolver.get_plugin", return_value=plugin) as patched:
yield patched
@fixture
def get_plugin_cannot_resolve_error() -> Iterator[PluginBase]:
plugin = MockPlugin({}, raises_cannot_resolve_error=True)
with patch("wev.resolver.get_plugin", return_value=plugin) as patched:
yield patched
@mark.parametrize(
"resolution, expect",
[
(None, False),
(Resolution.make(value=""), False),
(
Resolution.make(
value=None, expires_at=datetime.now() - timedelta(seconds=60)
),
False,
),
(
Resolution.make(
value=None, expires_at=datetime.now() + timedelta(seconds=60)
),
True,
),
],
)
def test_fresh_resolution(resolution: Optional[Resolution], expect: bool) -> None:
expect_resolution = resolution if expect else None
assert fresh_resolution(resolution=resolution) == expect_resolution
def test_resolve(get_plugin: Mock) -> None:
environs = resolve(state=MockState())
assert environs["alpha"] == "(value)"
assert environs["beta"] == "(value)"
assert environs["gamma"] == "gamma-value-old"
assert environs["delta"] == "(value)"
def test_resolve__removes_cache(get_non_caching_plugin: Mock) -> None:
state = MockState()
state.resolution_cache.update(names=("alpha",), resolution=Mock())
assert ("alpha",) in state.resolution_cache.resolutions
resolve(state=state)
assert ("alpha",) not in state.resolution_cache.resolutions
def test_resolve__cannot_resolve_error(get_plugin_cannot_resolve_error: Mock) -> None:
with raises(CannotResolveError) as ex:
resolve(state=MockState())
assert str(ex.value) == '"alpha-handler" failed: cannot reticulate splines'
| [
"noreply@github.com"
] | cariad.noreply@github.com |
40fc1773b286492d28d12972d62a9df0239d6aea | 3c41443364da8b44c74dce08ef94a1acd1b66b3e | /osf/migrations/0012_refactor_sessions.py | bbb8897ebfd22c094898150c9cd99591c3967d5e | [
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-warranty-disclaimer",
"AGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-proprietary-license",
"MPL-1.1",
"CPAL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"Apache-2.0"
] | permissive | CenterForOpenScience/osf.io | 71d9540be7989f7118a33e15bc4a6ce2d2492ac1 | a3e0a0b9ddda5dd75fc8248d58f3bcdeece0323e | refs/heads/develop | 2023-09-04T03:21:14.970917 | 2023-08-31T14:49:20 | 2023-08-31T14:49:20 | 10,199,599 | 683 | 390 | Apache-2.0 | 2023-09-14T17:07:52 | 2013-05-21T15:53:37 | Python | UTF-8 | Python | false | false | 1,371 | py | # Generated by Django 3.2.17 on 2023-05-08 19:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
import osf.models.base
import osf.utils.fields
class Migration(migrations.Migration):
dependencies = [
('osf', '0011_institution_rework_post_release'),
]
operations = [
migrations.CreateModel(
name='UserSessionMap',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('session_key', models.CharField(max_length=255)),
('expire_date', osf.utils.fields.NonNaiveDateTimeField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('user', 'session_key')},
},
bases=(models.Model, osf.models.base.QuerySetExplainMixin),
),
migrations.DeleteModel(
name='Session',
),
]
| [
"noreply@github.com"
] | CenterForOpenScience.noreply@github.com |
8f47220d6c0dfacdbf4b8aa948170400d4c06dec | 21ad3868aec282c45f813941c76a287eef6dd071 | /AirBnB_Clone/config/settings.py | 4bfb6cc1689916cd26c88e48b09e629befc917c1 | [] | no_license | BKLemontea/NomadCode-Clonecoding | 5f4cb919f872c4c4c699fbe451c72571e40621b9 | e84079a3f8f9d7ae58b1a2cfc8b75fee4fdf0c53 | refs/heads/master | 2023-04-30T16:29:13.087156 | 2020-01-23T13:15:17 | 2020-01-23T13:15:17 | 232,075,064 | 0 | 0 | null | 2023-04-21T20:46:55 | 2020-01-06T10:15:10 | Python | UTF-8 | Python | false | false | 3,088 | py | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y($0$dge8lb@w@l3!8*k40v2s!v52&jo0qm2^69%q^1=es-c4s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"jyf1128@naver.com"
] | jyf1128@naver.com |
d527e98714fbd97b33b6c55fea8ef13ac609e987 | 0fc55f28bf0ce73b85c9d78ea9dc7c26a497fa69 | /semana_11/exercicios/1248.py | b7103a6e9d555cd2642d025ca7e8b6657e533dd9 | [] | no_license | valeriacavalcanti/IP-2020.2---R | 6083f84b33242217e51e0a9a7440362179f3fcc3 | d376b4c4b55f7042cc5ae1520296eae570154e75 | refs/heads/main | 2023-05-08T01:08:45.757978 | 2021-06-08T03:09:56 | 2021-06-08T03:09:56 | 341,371,219 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | qtde = int(input())
for i in range(qtde):
dieta = list(input())
cafe = list(input())
almoco = list(input())
cheater = False
for i in range(len(cafe)):
if (cafe[i] in dieta):
dieta.remove(cafe[i])
else:
cheater = True
break
if (cheater == False):
for i in range(len(almoco)):
if (almoco[i] in dieta):
dieta.remove(almoco[i])
else:
cheater = True
break
if (cheater):
print('CHEATER')
else:
dieta.sort()
for i in range(len(dieta)):
print(dieta[i], end='')
print()
| [
"valeria.cavalcanti@ifpb.edu.br"
] | valeria.cavalcanti@ifpb.edu.br |
b0c5cf73141ec8203c4acb05630c125f46231006 | 7d274ce8dae971228a23157a409b561020c22f66 | /tools/packages/SCons/Tool/suncc.py | c062324709a9e119578f5e8b109c3106f9a330b9 | [] | no_license | Eigenlabs/EigenD-Contrib | a212884d4fdf9ae0e1aeb73f6311606212e02f94 | 586fe17471571802295c792697f255e6cab51b17 | refs/heads/master | 2020-05-17T07:54:48.668925 | 2013-02-05T10:20:56 | 2013-02-05T10:20:56 | 3,239,072 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,972 | py | """SCons.Tool.suncc
Tool-specific initialization for Sun Solaris (Forte) CC and cc.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/suncc.py 4577 2009/12/27 19:43:56 scons"
import SCons.Util
import cc
def generate(env):
"""
Add Builders and construction variables for Forte C and C++ compilers
to an Environment.
"""
cc.generate(env)
env['CXX'] = 'CC'
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS -KPIC')
env['SHOBJPREFIX'] = 'so_'
env['SHOBJSUFFIX'] = '.o'
def exists(env):
return env.Detect('CC')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| [
"jim@eigenlabs.com"
] | jim@eigenlabs.com |
c1e2d760b3e39b54a462df6259a94f9c704c993a | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/ASCII_20200609152320.py | 9c4300c8b62b762a900df3b70afab9afaed6c1d2 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | def ASCIIConversion(s):
s = s.split()
num = ""
for i in s:
for j in i:
num += str(ord(j))
num +=" "
print
# code goes here
return num | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
4996b4b469ab11a0f6d9653035de30b11d7f81f5 | 2cd0a84aefb8a7141d1c8da99845a8ada0cc009c | /tensorflow/python/training/ftrl_test.py | fe082ce8149495045ae5dd8c56a28b20aae88f2d | [
"Apache-2.0"
] | permissive | hholst80/tensorflow-old | d466cee96eac717524ab8e4ee85275ce28bb5d68 | 79df325975402e03df89747947ff5b7f18407c52 | refs/heads/master | 2022-12-20T22:07:40.427519 | 2016-05-13T09:57:24 | 2016-05-13T09:57:24 | 58,914,336 | 1 | 1 | Apache-2.0 | 2022-12-09T21:52:14 | 2016-05-16T08:00:04 | C++ | UTF-8 | Python | false | false | 9,599 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Ftrl operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class FtrlOptimizerTest(tf.test.TestCase):
def testFtrlwithoutRegularization(self):
with self.test_session() as sess:
var0 = tf.Variable([0.0, 0.0])
var1 = tf.Variable([0.0, 0.0])
grads0 = tf.constant([0.1, 0.2])
grads1 = tf.constant([0.01, 0.02])
opt = tf.train.FtrlOptimizer(3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose([0.0, 0.0], v0_val)
self.assertAllClose([0.0, 0.0], v1_val)
# Run 3 steps FTRL
for _ in range(3):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose(np.array([-2.60260963, -4.29698515]),
v0_val)
self.assertAllClose(np.array([-0.28432083, -0.56694895]),
v1_val)
def testFtrlwithoutRegularization2(self):
with self.test_session() as sess:
var0 = tf.Variable([1.0, 2.0])
var1 = tf.Variable([4.0, 3.0])
grads0 = tf.constant([0.1, 0.2])
grads1 = tf.constant([0.01, 0.02])
opt = tf.train.FtrlOptimizer(3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([4.0, 3.0], v1_val)
# Run 3 steps FTRL
for _ in range(3):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose(np.array([-2.55607247, -3.98729396]),
v0_val)
self.assertAllClose(np.array([-0.28232238, -0.56096673]),
v1_val)
def testFtrlWithL1(self):
with self.test_session() as sess:
var0 = tf.Variable([1.0, 2.0])
var1 = tf.Variable([4.0, 3.0])
grads0 = tf.constant([0.1, 0.2])
grads1 = tf.constant([0.01, 0.02])
opt = tf.train.FtrlOptimizer(3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose(np.array([-7.66718769, -10.91273689]),
v0_val)
self.assertAllClose(np.array([-0.93460727, -1.86147261]),
v1_val)
def testFtrlWithL1_L2(self):
with self.test_session() as sess:
var0 = tf.Variable([1.0, 2.0])
var1 = tf.Variable([4.0, 3.0])
grads0 = tf.constant([0.1, 0.2])
grads1 = tf.constant([0.01, 0.02])
opt = tf.train.FtrlOptimizer(3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose(np.array([-0.24059935, -0.46829352]),
v0_val)
self.assertAllClose(np.array([-0.02406147, -0.04830509]),
v1_val)
def applyOptimizer(self, opt, steps=5, is_sparse=False):
if is_sparse:
var0 = tf.Variable([[0.0], [0.0]])
var1 = tf.Variable([[0.0], [0.0]])
grads0 = tf.IndexedSlices(tf.constant([0.1], shape=[1, 1]),
tf.constant([0]),
tf.constant([2, 1]))
grads1 = tf.IndexedSlices(tf.constant([0.02], shape=[1, 1]),
tf.constant([1]),
tf.constant([2, 1]))
else:
var0 = tf.Variable([0.0, 0.0])
var1 = tf.Variable([0.0, 0.0])
grads0 = tf.constant([0.1, 0.2])
grads1 = tf.constant([0.01, 0.02])
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
sess = tf.get_default_session()
v0_val, v1_val = sess.run([var0, var1])
if is_sparse:
self.assertAllClose([[0.0], [0.0]], v0_val)
self.assertAllClose([[0.0], [0.0]], v1_val)
else:
self.assertAllClose([0.0, 0.0], v0_val)
self.assertAllClose([0.0, 0.0], v1_val)
# Run Ftrl for a few steps
for _ in range(steps):
update.run()
v0_val, v1_val = sess.run([var0, var1])
return v0_val, v1_val
# When variables are initialized with Zero, FTRL-Proximal has two properties:
# 1. Without L1&L2 but with fixed learning rate, FTRL-Proximal is identical
# with GradientDescent.
# 2. Without L1&L2 but with adaptive learning rate, FTRL-Proximal is identical
# with Adagrad.
# So, basing on these two properties, we test if our implementation of
# FTRL-Proximal performs same updates as Adagrad or GradientDescent.
def testEquivAdagradwithoutRegularization(self):
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0))
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.AdagradOptimizer(3.0, initial_accumulator_value=0.1))
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
def testEquivSparseAdagradwithoutRegularization(self):
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
is_sparse=True)
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
is_sparse=True)
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
def testEquivSparseGradientDescentwithoutRegularizaion(self):
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
is_sparse=True)
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.GradientDescentOptimizer(3.0), is_sparse=True)
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
def testEquivGradientDescentwithoutRegularizaion(self):
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0))
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.GradientDescentOptimizer(3.0))
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
if __name__ == "__main__":
tf.test.main()
| [
"henrik.holst@frostbite.com"
] | henrik.holst@frostbite.com |
6163a9437f0f47dc14fe8cea4543eab236e6b4cb | 35c4c0ae37c78124732bc8056f9b9940cc80779b | /Data/DP/Unbounded Knapsack Variations/Maximum ribbon cut.py | 0e69f4385a3768bfac586d7392aca0aca529f919 | [] | no_license | bhusalashish/DSA-1 | 8189c6fe27a7905eaa3ea0a404a38164245c8b6e | 573b737483193c30753e7afc5d564396318d45ff | refs/heads/master | 2023-02-09T21:44:16.691700 | 2020-12-25T06:19:58 | 2020-12-25T06:19:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | '''
#### Name: Unbounded Knapsack Variations
Link: [link]()
#### Sub_question_name: Maximum ribbon cut
Link: [link]()
''' | [
"nishan.paudel1914@gmail.com"
] | nishan.paudel1914@gmail.com |
7eeeb417193fc4b35fc46fb6c6a35d5b92564d23 | d2845579ea6aa51a2e150f0ffe6ccfda85d035ce | /flow/web/api/flow/clean_api.py | 63a5863955bfb4a428e7dd7607b966adac194424 | [
"Apache-2.0"
] | permissive | as23187/WeFe | d8de9ff626f9f3e5d98e0850b0b717a80fd73e72 | ba92871d4b1d2eef6c606c34795f4575e84703bd | refs/heads/main | 2023-08-22T12:01:06.718246 | 2021-10-28T01:54:05 | 2021-10-28T01:54:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,053 | py | # Copyright 2021 Tianmian Tech. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flow.web.api.base.base_api import BaseApi
from flow.web.api.base.dto.base_api_input import BaseApiInput
from flow.web.api.base.dto.base_api_output import BaseApiOutput
from flow.web.service.before_stop_service import BeforeStop
class Input(BaseApiInput):
pass
class Api(BaseApi):
def run(self, input):
"""
Flow cleanup before stop
"""
BeforeStop().do()
return BaseApiOutput.success(input)
| [
"winter.zou@welab-inc.com"
] | winter.zou@welab-inc.com |
2ead28b8c7627ac9cbab3a24b2bf940c88f36ed7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03385/s360085230.py | 1283e15b631a1ef91e6acb904356dd12fed9d648 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | S = len(set(list(input())))
print('Yes' if S == 3 else 'No')
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
9ebdbf17bb8fe42398267286c3a99eb8bd6c6866 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/1/chk.py | 9dd69df289854f2db2c484a59e46bbde94cba833 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'cHK':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
f21e48568f4a7f9a9b38bf99752af0ac2f1dd7d2 | 6fa0d5d3b61fbce01fad5a7dd50258c09298ee00 | /Algorithm/BOJ/15_backtracking/15649.py | 4be9c6e515ee78768022769a7f81acf75e08e4fd | [] | no_license | athletejuan/TIL | c8e6bd9f7e2c6f999dbac759adcdb6b2959de384 | 16b854928af2f27d91ba140ebc1aec0007e5eb04 | refs/heads/master | 2023-02-19T13:59:06.495110 | 2022-03-23T15:08:04 | 2022-03-23T15:08:04 | 188,750,527 | 1 | 0 | null | 2023-02-15T22:54:50 | 2019-05-27T01:27:09 | Python | UTF-8 | Python | false | false | 286 | py | N,M = map(int, input().split())
picked = []
def array(N,M):
if M == 0:
return print(' '.join(picked))
for _ in range(1, N+1):
if not picked or str(_) not in picked:
picked.append(str(_))
array(N, M-1)
picked.pop()
array(N,M) | [
"vanillasky84.0627@gmail.com"
] | vanillasky84.0627@gmail.com |
4dfcfcd4e5b3d97d57a8c175d1a3bea34b13d009 | b424a13f032d5a607e6df4dd78bc47ad1d06a147 | /splat/evolve.py | 38e9f86832bbfe20e8645aa974aa019a9253ee3f | [] | no_license | EnjoyLifeFund/macSierra-py36-pkgs | 1e7eeb9b55415da6eb12465d67730d76e9cc619a | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | refs/heads/master | 2021-01-20T10:23:50.044019 | 2017-09-05T02:53:26 | 2017-09-05T02:53:26 | 90,333,987 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 56,950 | py | from __future__ import print_function, division
"""
.. note::
Using a suite of evolutionary models, this code translates
between the following brown dwarf parameters: mass, age,
temperature, radius, surface gravity, and luminosity. We allow
the user to choose a set of evolutionary model
(Baraffe, Burrows, or Saumon) and two parameters, then output
the rest of the interpolated parameters.
"""
# imports: internal
import copy
import requests
# imports: external
from astropy import units as u
from astropy.cosmology import Planck15, z_at_value
from astropy.io import ascii
import pandas
import matplotlib.pyplot as plt
import numpy
from scipy.interpolate import interp1d
import scipy.integrate as integrate
import scipy.stats as stats
# imports: splat
from .initialize import *
from .utilities import *
###############################################################################
###############################################################################
def loadEvolModel(*model,**kwargs):
'''
:Purpose: Reads in the evolutionary model parameters for the models listed below, which are used to interpolate parameters in `modelParameters()`_.
.. _`modelParameters()` : api.html#splat_evolve.modelParameters
Available models are:
- **burrows** : Models from `Burrows et al. (2001) <http://adsabs.harvard.edu/abs/2001RvMP...73..719B>`_ for 1 Myr < age < 10 Gyr, 0.005 Msol < mass < 0.2 Msol, and solar metallicity
- **baraffe** : Models from `Baraffe et al. (2003) <http://adsabs.harvard.edu/abs/2003A&A...402..701B>`_ for 1 Myr < age < 10 Gyr, 0.005 Msol < mass < 0.1 Msol, and solar metallicity (COND dust prescription)
- **saumon** : Models from `Saumon et al. (2003) <http://adsabs.harvard.edu/abs/2008ApJ...689.1327S>`_ for 3 Myr < age < 10 Gyr, 0.002 Msol < mass < 0.085 Msol, although mass and age ranges vary as the maximum temperature for the models is 2500 K. For these models there are additional options:
- **metallicity** = `solar`, `+0.3`, or `-0.3`
- **cloud** = `cloud-free`, `hybrid`, `f2` (sub- and super-solar metallicities are only cloud-free)
Parameter units (in astropy convention) are:
- `masses`: Solar masses
- `ages`: Gyr
- `temperature`: K
- `gravity`: log10 of cm/s/s
- `luminosity`: log10 of Solar luminosities
- `radius`: Solar radii
Models are contained in SPLAT's reference/EvolutionaryModels folder.
Required Inputs:
:param: model: string of the name of the evolutionary model set to be used; can be `baraffe` (default), `burrows`, or `saumon`
Optional Inputs:
:param: metallicity: for Saumon models, this is the metallicity assumed, and can be a string or integer. Allowed values are 0 (or `solar` = default), -0.3 (or `subsolar`) or 0.3 (or `supersolar`)
:param: cloud: for Saumon models, this is the desired cloud prescription, and is a string:
- no clouds: `cloud` = `nocloud`, `cloud-free` or `nc` (default)
- hybrid cloud prescription: `cloud` = `hybrid`
- f2 cloud prescription: `cloud` = `f2`
Output:
Dictionary containing keywords mass, age, temperature, luminosity, gravity, and radius, each linked to the evolutionary parameters retrieved.
:Example:
>>> import splat
>>> p = splat.loadEvolModel('saumon',metallicity=-0.3,cloud='nc')
You are using saumon's models.
>>> for k in list(p.keys()): print('{}: {}'.format(k, p[k][12]))
age: 0.15
mass: [ 0.002 0.003 0.004 0.005 0.006 0.007 0.008 0.009 0.01 0.011
0.012 0.013 0.014 0.015 0.016 0.017 0.018 0.019 0.02 0.022
0.024 0.026 0.028 0.03 0.033 0.035 0.038 0.04 0.043 0.045
0.048 0.05 0.053]
temperature: [ 353. 418. 471. 523. 585. 642. 695. 748. 806. 893.
1146. 1228. 1114. 1113. 1148. 1183. 1227. 1270. 1316. 1402.
1489. 1572. 1654. 1739. 1853. 1930. 2030. 2096. 2187. 2240.
2316. 2362. 2426.]
gravity: [ 3.576 3.746 3.871 3.972 4.056 4.128 4.191 4.246 4.296 4.335
4.337 4.368 4.437 4.479 4.512 4.543 4.571 4.597 4.621 4.665
4.704 4.74 4.772 4.8 4.839 4.861 4.892 4.909 4.931 4.947
4.966 4.978 4.996]
luminosity: [-6.691 -6.393 -6.185 -6.006 -5.815 -5.658 -5.527 -5.404 -5.277 -5.098
-4.628 -4.505 -4.709 -4.724 -4.675 -4.627 -4.568 -4.51 -4.45 -4.342
-4.24 -4.146 -4.058 -3.969 -3.856 -3.781 -3.69 -3.628 -3.546 -3.5 -3.432
-3.393 -3.34 ]
radius: [ 0.1206 0.1214 0.1214 0.1209 0.1202 0.1195 0.1189 0.1182 0.1178
0.1181 0.123 0.1235 0.1184 0.1167 0.1161 0.1154 0.1151 0.1148
0.1146 0.1142 0.1139 0.1139 0.1138 0.1141 0.1144 0.115 0.1155
0.1163 0.1174 0.118 0.1193 0.12 0.121 ]
'''
# check model
try: model = model[0].lower()
except TypeError: raise TypeError("Model must be a string.")
except IndexError: model = 'baraffe'
finally: print("You are using " + model + "'s models.")
assert model in EMODELS, "\nModel {} not in allowed model sets; please use: {}\n".format(model,' '.join(EMODELS))
##################### BARAFFE OR BURROWS MODEL #########################
if model == 'baraffe' or model == 'burrows':
ages = ['0.001', '0.005', '0.010', '0.050', '0.100',
'0.120', '0.500', '1.000', '5.000', '10.000']
if model == 'baraffe':
prefix = 'Baraffe/cond_'
else:
prefix = 'Burrows/b97_'
########################### SAUMON MODEL ##############################
else:
# set metallicity
metallicity = kwargs.get('z',False)
metallicity = kwargs.get('metallicity',metallicity)
if metallicity == False:
metallicity = 'solar'
if isinstance(metallicity,int) or isinstance(metallicity,float):
metallicity = '{:.1f}'.format(metallicity)
if metallicity.lower() == 'solar' or metallicity == '0.0':
Z = 'z0'
elif metallicity == '0.3' or metallicity == '+0.3' or metallicity == 'supersolar':
Z = 'z+0.3'
elif metallicity == '-0.3' or metallicity == 'subsolar':
Z = 'z-0.3'
else:
raise ValueError('\nMetallicity for Saumon model must be 0.0 (solar), +0.3 or -0.3, not {}\n'.format(metallicity))
# set cloud treatment
cloud = kwargs.get('cloud',False)
cloud = kwargs.get('clouds',cloud)
cloud = kwargs.get('cld',cloud)
if cloud == False:
cloud = 'nc'
if metallicity=='-0.3' or metallicity=='0.3':
C = 'nc'
if isinstance(cloud,int) or isinstance(cloud,float):
cloud = 'f{:1d}'.format(int(cloud))
if cloud.lower() == 'hybrid':
C = 'hybrid'
elif cloud.lower() == 'f2':
C = 'f2'
elif cloud.lower() == 'nc' or cloud.lower() == 'nocloud' or cloud.lower() == 'noclouds' or cloud.lower() == 'cloud-free':
C = 'nc'
else:
raise ValueError('\nCould not recognize cloud choice for Saumon model: must be cloud-free, hybrid or f2, not {}\n'.format(cloud))
ages = ['0.003','0.004','0.006','0.008','0.010','0.015','0.020',
'0.030','0.040','0.060','0.080','0.100','0.150','0.200',
'0.300','0.400','0.600','0.800','1.000','1.500','2.000',
'3.000','4.000','6.000','8.000','10.000']
prefix = 'Saumon/sau08_{:s}_{:s}_'.format(Z,C)
#######################################################################
# read in parameters
mparam = {}
for ep in EPARAMETERS:
mparam[ep] = []
for i,age in enumerate(ages):
mfile = prefix+'{:05d}.txt'.format(int(float(age)*1000.))
try:
dp=pandas.read_csv(SPLAT_PATH+EVOLUTIONARY_MODEL_FOLDER+mfile,comment='#',sep=',',header=0)
for ep in EPARAMETERS:
mparam[ep].append(dp[ep].values)
# this is done in case models are not local - NOTE: currently just throwing an error
except:
raise ValueError('Could not find model file {} locally; aborting'.format(mfile))
# try:
# print('Could not read in model file {} locally; trying online'.format(mfile))
# data =ascii.read(requests.get(SPLAT_URL+EVOLUTIONARY_MODEL_FOLDER+mfile).content,comment='#',delimiter='\t')
# except:
# raise ValueError('Could not find model file {} locally or online; aborting'.format(mfile))
mparam['age'] = [float(i) for i in ages]
return mparam
def _modelParametersSingle(*args, **kwargs):
'''
:Purpose: Driver function for modelParameters_, performs actual interpolation of evolutionary models. See SPLAT API for `modelParameters()`_ for details.
.. _`modelParameters()` : api.html#splat_evolve.modelParameters
'''
keywords = list(kwargs.keys())
# check that model is passed correctly
try: model = args[0]
except IndexError:
model = loadEvolModel('baraffe')
print('\nWarning: model error; using Baraffe models by default\n')
# retool models to allow for logarithmic interpolation
lmodel = copy.deepcopy(model)
# strip off units
lmodel['age'] = [numpy.log10(m) for m in lmodel['age']]
for i in range(len(lmodel['age'])):
lmodel['mass'][i] = [numpy.log10(m) for m in lmodel['mass'][i]]
lmodel['temperature'][i] = [numpy.log10(m) for m in lmodel['temperature'][i]]
lmodel['radius'][i] = [numpy.log10(m) for m in lmodel['radius'][i]]
# prep output parameters
params = {}
for e in EPARAMETERS:
params[e] = 0.
for e in EPARAMETERS:
if e in keywords:
try: f = float(kwargs[e])
except: raise ValueError('\nInput paramter {} must be a single number, not {}\n'.format(e,kwargs[e]))
finally: params[e] = f
input_type = 'mass_age'
Ag, Ma, Te, Le, Ge, Re, P = [],[],[],[],[],[],[]
############### UNKNOWN MASS AND AGE - INTERPOLATE AGE FROM OTHER PARAMETERS #################
# for each age, interpolate mass as a function of first parameter and then second parameter as a function of mass
# and obtain second parameter as a function of age; then interpolate the model ages as a function of
# the second parameter and evaluate for known parameter to get age
###############################################################################
if (params['mass'] == 0.) and (params['age'] == 0.):
input_type = 'two_params'
if params['temperature'] != 0.:
P.append(['temperature', numpy.log10(params['temperature'])])
if params['gravity'] != 0.:
P.append(['gravity', params['gravity']])
if params['radius'] != 0.:
P.append(['radius', numpy.log10(params['radius'])])
if params['luminosity'] != 0.:
P.append(['luminosity', params['luminosity']])
for i,age in enumerate(lmodel['age']):
if min(lmodel[P[0][0]][i]) <= P[0][1] <= max(lmodel[P[0][0]][i]) \
and min(lmodel[P[1][0]][i]) <= P[1][1] <= max(lmodel[P[1][0]][i]):
Ag.append(age)
f = interp1d(lmodel[P[0][0]][i], lmodel['mass'][i])
Ma = f(P[0][1])
f = interp1d(lmodel['mass'][i], lmodel[P[1][0]][i])
Ge.append(f(Ma))
try:
f = interp1d(Ge, Ag)
params['age'] = 10.**f(P[1][1])
except: params['age'] = float('nan')
Ge, Ag, Ma = [], [], []
################ UNKNOWN AGE BUT KNOWN MASS AND ONE OTHER PARAMETER ###########
# interpolate second parameter as a function of mass for each of the age models and evaluate for known mass
# interpolate the model ages as a fucntion of these parameters and evaluate for known parameter
###############################################################################
if params['age'] == 0. and params['mass'] != 0. and \
not numpy.isnan(params['mass']):
if input_type != 'two_params':
input_type = 'one_param'
if params['temperature'] != 0.:
P.append(['temperature', numpy.log10(params['temperature'])])
elif params['gravity'] != 0.:
P.append(['gravity', params['gravity']])
elif params['radius'] != 0.:
P.append(['radius', numpy.log10(params['radius'])])
elif params['luminosity'] != 0.:
P.append(['luminosity', numpy.log10(params['luminosity'])])
else:
for k in list(params.keys()):
print('{}: {}'.format(k,params[k]))
print(P)
raise ValueError('\nProblem with one_param interpolation\n')
for i,age in enumerate(lmodel['age']):
if min(lmodel['mass'][i]) <= numpy.log10(params['mass']) <= max(lmodel['mass'][i]):
Ag.append(age)
f = interp1d(lmodel['mass'][i], lmodel[P[0][0]][i])
Ge.append(f(numpy.log10(params['mass'])))
try:
f = interp1d(Ge, Ag)
params['age'] = 10.**f(P[0][1])
except:
print('\nFailed in age + parameter determination\n')
params['age'] = float('nan')
Ge, Ag = [], []
################ KNOWN AGE BUT UNKNOWN MASS AND ONE OTHER PARAMETER ###########
# generate mass as function of second parameter interpolated between two closest age models
# evaluate mass(parameter) (resulting in both mass and age as knowns)
###############################################################################
if params['age'] != 0. and params['mass'] == 0. and \
not numpy.isnan(params['age']):
if input_type != 'two_params' and input_type != 'one_param':
input_type = 'one_param'
if params['temperature'] != 0.:
P.append(['temperature', numpy.log10(params['temperature'])])
elif params['gravity'] != 0.:
P.append(['gravity', params['gravity']])
elif params['radius'] != 0.:
P.append(['radius', numpy.log10(params['radius'])])
elif params['luminosity'] != 0.:
P.append(['luminosity', numpy.log10(params['luminosity'])])
else:
for k in list(params.keys()):
print('{}: {}'.format(k,params[k]))
print(P)
raise ValueError('\nProblem with one_param interpolation\n')
if numpy.log10(params['age']) < numpy.min(lmodel['age']) or \
numpy.log10(params['age']) > numpy.max(lmodel['age']):
print('\nAge of {} is outside range of models, {} to {}\n'.format(params['age'],10.**numpy.min(lmodel['age']),10**numpy.max(lmodel['age'])))
params['mass'] = numpy.nan
else:
adiff = [numpy.log10(params['age'])-a for a in lmodel['age']]
ai = numpy.argmin(numpy.abs(adiff))
if adiff[ai] < 0:
ai-=1
for i,m in enumerate(lmodel['mass'][ai]):
if m in lmodel['mass'][ai+1]:
Ma.append(m)
aj = numpy.argmin(numpy.abs([a-m for a in lmodel['mass'][ai+1]]))
vals = [lmodel[P[0][0]][ai][i],lmodel[P[0][0]][ai+1][aj]]
f = interp1d(lmodel['age'][ai:ai+2],vals)
Ge.append(f(numpy.log10(params['age'])))
try:
f = interp1d(Ge, Ma)
params['mass'] = 10.**f(P[0][1])
except:
print('\nFailed in mass + parameter determination\n')
params['mass'] = numpy.nan
Ma, Ge = [],[]
###################### KNOWN MASS AND AGE #####################################
# generate parameters as a function of mass interpolated between two closest age models
# evaluate parameters(mass)
###############################################################################
if params['mass'] != 0. and params['age'] != 0. and \
not numpy.isnan(params['age']) and not numpy.isnan(params['mass']):
for i,age in enumerate(lmodel['age']):
if min(lmodel['mass'][i]) <= numpy.log10(params['mass']) \
<= max(lmodel['mass'][i]):
Ag.append(age)
f =interp1d(lmodel['mass'][i],lmodel['temperature'][i])
Te.append(f(numpy.log10(params['mass'])))
f = interp1d(lmodel['mass'][i],lmodel['luminosity'][i])
Le.append(f(numpy.log10(params['mass'])))
f = interp1d(lmodel['mass'][i],lmodel['gravity'][i])
Ge.append(f(numpy.log10(params['mass'])))
f = interp1d(lmodel['mass'][i],lmodel['radius'][i])
Re.append(f(numpy.log10(params['mass'])))
if params['temperature'] == 0.:
try:
f = interp1d(Ag, Te)
params['temperature'] = 10.**f(numpy.log10(params['age']))
except:
params['temperature'] = numpy.nan
if params['luminosity'] == 0.:
try:
f = interp1d(Ag, Le)
params['luminosity'] = f(numpy.log10(params['age'])).item(0)
except:
params['luminosity'] = numpy.nan
if params['gravity'] == 0.:
try:
f = interp1d(Ag, Ge)
params['gravity'] = f(numpy.log10(params['age'])).item(0)
except:
params['gravity'] = numpy.nan
if params['radius'] == 0.:
try:
f = interp1d(Ag, Re)
params['radius'] = 10.**f(numpy.log10(params['age']))
except:
params['radius'] = numpy.nan
return params
# something failed
else:
for e in EPARAMETERS:
params[e] = numpy.nan
print('\nParameter set is not covered by models\n')
return params
###############################################################################
def modelParameters(*model,**kwargs):
'''
:Purpose: Retrieves the evolutionary model parameters given two of the following parameters: mass, age, temperature, luminosity, gravity, or radius. The inputs can be individual values or arrays. Using the input parameters, the associated evolutionary model parameters are computed through log-linear interpolation of the original model grid. Parameters that fall outside the grid return nan.
Required Inputs:
:param: model: Either a string of the name of the evolutionary model set, which can be one of `baraffe` (default), `burrows`, or `saumon`; or a dictionary output from `loadEvolModel()`_ containing model parameters.
and two (2) of the following:
:param: mass: input value of list of values for mass (can also be `masses` or `m`)
:param: age: input value of list of values for age (can also be `ages`, `time` or `a`)
:param: temperature: input value of list of values for temperature (can also be `temperatures`, `teff`, `temp` or `t`)
:param: gravity: input value of list of values for gravity (can also be `gravities`, `grav`, `logg` or `g`)
:param: luminosity: input value of list of values for luminosity (can also be `luminosities`, `lum`, `lbol` or `l`)
:param: radius: input value of list of values for radius (can also be `radii`, `rad` and `r`)
.. _`loadEvolModel()` : api.html#splat_evolve.loadEvolModel
Optional Inputs:
:param: Parameters for `loadEvolModel()`_ may also be used.
Output:
Dictionary containing keywords mass, age, temperature, luminosity, gravity, and radius, each linked to the evolutionary parameters retrieved.
:Example:
>>> import splat, numpy
>>> masses = numpy.random.uniform(0.01,0.1,20)
>>> ages = numpy.random.uniform(0.01,10,20)
>>> p = splat.modelParameters('baraffe',mass=masses,age=ages)
You are using baraffe's models.
>>> print(p.temperature)
[ 2502.90132332 2818.85920306 1002.64227134 1330.37273021 1192.86976417
500.45609068 2604.99966013 1017.03307609 1774.18267474 1675.12181635
2682.9697321 2512.45223777 346.41152614 2066.19972036 843.28528456
2264.93051445 2767.85660557 348.84214986 922.87030167 2669.27152307] K
'''
# read in model
try: model = model[0]
except IndexError:
if kwargs.get('model',False)==False:
model = 'baraffe'
else: model=kwargs.get('model')
if type(model) is not dict: model = loadEvolModel(model,**kwargs)
keywords = list(kwargs.keys())
# do some key word replacement
mkwargs = {}
for e in EPARAMETERS:
if e in keywords:
mkwargs[e] = kwargs[e]
if 'temperature' not in keywords:
if 't' in keywords:
mkwargs['temperature'] = kwargs['t']
if 'teff' in keywords:
mkwargs['temperature'] = kwargs['teff']
if 'temp' in keywords:
mkwargs['temperature'] = kwargs['temp']
if 'gravity' not in keywords:
if 'g' in keywords:
mkwargs['gravity'] = kwargs['g']
if 'logg' in keywords:
mkwargs['gravity'] = kwargs['logg']
if 'grav' in keywords:
mkwargs['gravity'] = kwargs['grav']
if 'mass' not in keywords:
if 'm' in keywords:
mkwargs['mass'] = kwargs['m']
if 'age' not in keywords:
if 'time' in keywords:
mkwargs['age'] = kwargs['time']
if 'a' in keywords:
mkwargs['age'] = kwargs['a']
if 'radius' not in keywords:
if 'r' in keywords:
mkwargs['radius'] = kwargs['r']
if 'rad' in keywords:
mkwargs['radius'] = kwargs['rad']
if 'luminosity' not in keywords:
if 'l' in keywords:
mkwargs['luminosity'] = kwargs['l']
if 'lum' in keywords:
mkwargs['luminosity'] = kwargs['lum']
if 'lbol' in keywords:
mkwargs['luminosity'] = kwargs['lbol']
# determine length of input arrays
inparams = {}
outparams = {}
pkeys = list(mkwargs.keys())
for p in EPARAMETERS:
outparams[p] = []
if p in pkeys:
if isinstance(mkwargs[p],float) or isinstance(mkwargs[p],int):
mkwargs[p] = [mkwargs[p]]
numberValues = len(mkwargs[p])
# now loop through each parameter set to determine remaining parameters
for i in range(numberValues):
for p in pkeys:
inparams[p] = mkwargs[p][i]
par = _modelParametersSingle(model,**inparams)
for p in EPARAMETERS:
outparams[p].append(par[p])
# remove lists if only one parameter set is being calculated
if len(outparams['temperature']) == 1:
for e in EPARAMETERS:
outparams[e] = outparams[e][0]
# add units
for e in EPARAMETERS:
outparams[e] *= EPARAMETER_UNITS[e]
return outparams
def plotModelParameters(parameters,xparam,yparam,**kwargs):
'''
:Purpose: Plots pairs of physical star parameters and optionally compares to evolutionary model tracks.
Required Inputs:
:param: parameters: dictionary or nested set of two arrays containing parameters to be plotted. For dictionary, keywords should include the `xparameter` and `yparameter` strings to be plotted. Values associated with keywords can be single numbers or arrays
:param: xparam: string corresponding to the key in the `parameters` dictionary to be plot as the x (independent) variable.
:param: yparam: string corresponding to the key in the `parameters` dictionary to be plot as the y (dependent) variable.
Optional Inputs:
.. _`loadEvolModel()` : api.html#splat_evolve.loadEvolModel
:param: showmodel: set to True to overplot evolutionary model tracks from `model` (default = True)
:param: model: either a string of the name of the evolutionary model set, one of `baraffe` (default), `burrows`, or `saumon`; or a dictionary output from `loadEvolModel()`_ containing model parameters.
:param: tracks: string indicating what model tracks to show; can either be `mass` (default) or `age`
:param: file: name of file to output plot (`output` can also be used)
:param: show: set to True to show the plot onscreen (default = True)
:param: figsize: a two-element array defining the figure size (default = [8,6])
:param: color: color of data symbols (default = 'blue')
:param: marker: matplotlib marker type for data symbols (default = 'o')
:param: xlabel: string overriding the x-axis label (default = parameter name and unit)
:param: ylabel: string overriding the y-axis label (default = parameter name and unit)
:param: title: string specifying plot title (no title by default)
:param: tight: set to True to tighten plot to focus on the data points (default = True)
Output:
A matplotlib plot object. Optionally, can also show plot on screen or output plot to a file.
:Example:
>>> import splat, numpy
>>> age_samp = 10.**numpy.random.normal(numpy.log10(1.),0.3,50)
>>> mass_samp = numpy.random.uniform(0.001,0.1,50)
>>> p = splat.modelParameters('baraffe',age=age_samp,mass=mass_samp)
>>> splat.plotModelParameters(p,'age','temperature',showmodels=True,model='baraffe',show=True)
[plot of temperature vs age for 50 data points with baraffe models overplotted]
'''
# check inputs
if type(parameters) is not dict:
if len(parameters) != 2:
raise ValueError('\nInput parameters should be a dictionary or two-element list\n')
else:
param = {xparam: parameters[0], yparam: parameters[1]}
else:
param = copy.deepcopy(parameters)
keys = list(param.keys())
if xparam not in keys:
raise ValueError('\nCould not find parameter {} in input dictionary\n'.format(xparam))
if yparam not in keys:
raise ValueError('\nCould not find parameter {} in input dictionary\n'.format(yparam))
if isinstance(param[xparam],list) == False:
param[xparam] = [param[xparam]]
if isinstance(param[yparam],list) == False:
param[yparam] = [param[yparam]]
# sort flags
if xparam=='age' or xparam=='time' or xparam=='a':
xmparam = 'age'
xlogflag = True
elif xparam=='mass' or xparam=='m':
xmparam = 'mass'
xlogflag = True
elif xparam=='temperature' or xparam=='teff' or xparam=='t':
xmparam = 'temperature'
xlogflag = True
elif xparam=='radius' or xparam=='r':
xmparam = 'radius'
xlogflag = True
elif xparam=='gravity' or xparam=='logg' or xparam=='g':
xmparam = 'gravity'
xlogflag = False
elif xparam=='luminosity' or xparam=='lbol' or xparam=='l':
xmparam = 'luminosity'
xlogflag = False
else:
raise ValueError('\nx-axis parameter {} is not one that can be plotted'.format(xparam))
if yparam=='age' or yparam=='time' or yparam=='a':
ymparam = 'age'
ylogflag = True
elif yparam=='mass' or yparam=='m':
ymparam = 'mass'
ylogflag = True
elif yparam=='temperature' or yparam=='teff' or yparam=='t':
ymparam = 'temperature'
ylogflag = True
elif yparam=='radius' or yparam=='r':
ymparam = 'radius'
ylogflag = True
elif yparam=='gravity' or yparam=='logg' or yparam=='g':
ymparam = 'gravity'
ylogflag = False
elif yparam=='luminosity' or yparam=='lbol' or yparam=='l':
ymparam = 'luminosity'
ylogflag = False
else:
raise ValueError('\ny-axis parameter {} is not one that can be plotted'.format(yparam))
# plot parameters
plt.close('all')
plt.figure(figsize=kwargs.get('figsize',[8,6]))
if xlogflag == True and ylogflag == True:
plt.loglog(param[xparam],param[yparam],color=kwargs.get('color','blue'),marker=kwargs.get('marker','o'))
elif xlogflag == False and ylogflag == True:
plt.semilogy(param[xparam],param[yparam],color=kwargs.get('color','blue'),marker=kwargs.get('marker','o'))
elif xlogflag == True and ylogflag == False:
plt.semilogx(param[xparam],param[yparam],color=kwargs.get('color','blue'),marker=kwargs.get('marker','o'))
else:
plt.plot(param[xparam],param[yparam],color=kwargs.get('color','blue'),marker=kwargs.get('marker','o'))
# read in models to display
if kwargs.get('showmodel',True) != False or kwargs.get('showmodels',True) != False:
if kwargs.get('model',False) == False:
model = 'baraffe'
else:
model = kwargs.get('model')
try:
if type(model) is not dict: model = loadEvolModel(model,**kwargs)
except:
print('\nProblem in reading in original models\n')
kwargs['showmodel'] = False
if kwargs.get('showmodel',True) != False or kwargs.get('showmodels',True) != False:
tvals,xvals,yvals = [],[],[]
# models tracks trace mass (by default)
if kwargs.get('tracks','mass') == 'mass':
masses = []
for i in model['mass']:
masses.extend(i)
masses.sort()
tvals = numpy.unique(masses)
for j,m in enumerate(tvals):
xx,yy = [],[]
for i,x in enumerate(model['age']):
if m in model['mass'][i]:
if xmparam != 'age':
xx.append(numpy.array(model[xmparam][i])[numpy.where(model['mass'][i]==m)].item(0))
else:
xx.append(x)
if ymparam != 'age':
yy.append(numpy.array(model[ymparam][i])[numpy.where(model['mass'][i]==m)].item(0))
else:
yy.append(x)
else:
xx.append(numpy.nan)
yy.append(numpy.nan)
xvals.append(xx)
yvals.append(yy)
# models tracks trace isochrones
else:
tvals = model['age']
# fix to account for unequal lengths of model values
maxlen = numpy.max([len(a) for a in models['mass']])
for i,x in enumerate(tvals):
t = numpy.zeros(maxlen)
t.fill(numpy.nan)
if xparam != 'age':
t[0:len(model[xparam][i])] = model[xmparam][i]
else:
t.fill(x)
xvals.append(t.tolist())
s = numpy.zeros(maxlen)
s.fill(numpy.nan)
if yparam != 'age':
s[0:len(model[yparam][i])] = model[ymparam][i]
else:
s.fill(x)
yvals.append(s.tolist())
# plot them
for i,x in enumerate(xvals):
if xlogflag == True and ylogflag == True:
plt.loglog(xvals[i],yvals[i],color='grey')
elif xlogflag == False and ylogflag == True:
plt.semilogy(xvals[i],yvals[i],color='grey')
elif xlogflag == True and ylogflag == False:
plt.semilogx(xvals[i],yvals[i],color='grey')
else:
plt.plot(xvals[i],yvals[i],color='grey')
# add labels
plt.xlabel(kwargs.get('xlabel','{} ({})'.format(xmparam,EPARAMETER_UNITS[xmparam])))
plt.ylabel(kwargs.get('ylabel','{} ({})'.format(ymparam,EPARAMETER_UNITS[ymparam])))
if kwargs.get('title',False) != False:
plt.title(kwargs.get('title'))
# tighten plot
if kwargs.get('tight',True) == True:
xrng = [numpy.nanmin(param[xparam]),numpy.nanmax(param[xparam])]
if xlogflag ==True:
xsep = xrng[1]/xrng[0]
if xsep != 1.:
plt.xlim([xrng[0]/(xsep**0.1),xrng[1]*(xsep**0.1)])
else:
xsep = xrng[1]-xrng[0]
if xsep != 0.:
plt.xlim([xrng[0]-0.05*xsep,xrng[1]+0.05*xsep])
yrng = [numpy.nanmin(param[yparam]),numpy.nanmax(param[yparam])]
if ylogflag ==True:
ysep = yrng[1]/yrng[0]
if ysep != 1.:
plt.ylim([yrng[0]/(ysep**0.1),yrng[1]*(ysep**0.1)])
else:
ysep = yrng[1]-yrng[0]
if ysep != 0.:
plt.ylim([yrng[0]-0.05*ysep,yrng[1]+0.05*ysep])
# save the plot or display
file = kwargs.get('file',False)
file = kwargs.get('output',file)
if file != False:
plt.savefig(file)
elif kwargs.get('show',True) == True:
plt.show()
else:
pass
return plt
def simulateAges(num,**kwargs):
'''
:Purpose: Generates a distribution of ages based on the defined input distribution.
Required Inputs:
:param: num: number of ages to generate
Optional Inputs:
:param: age_range: range of ages to draw from (default = [0.1,10.]); can also specify `range`, `minage` or `min`, and `maxage` or `max`
:param: distribution: either a string set to one of the following to define the type of age distribution (or reverse star formation rate) desired:
* `uniform`: uniform distribution (default)
* `exponential`: exponential age distribution, P(t) ~ e\^(beta x t). You can specify the parameters `beta` or `tau` = 1/beta, or set ``distribution`` to `aumer` or `miller`
* `double_exponential`: double exponential age distribution, P(t) ~ Ae\^(lambda x t) + e\^(beta x t). You can specify the parameters `beta`, `lambda` and `a` or set ``distribution`` to `aumer_double` (default parameters)
* `cosmic` or `rujopakarn`: cosmic age distribution with P(t) ~ (1+z(t))\^alpha, where z is the redshift, which is converted to time using the Planck 2015 cosmology. You can specify the parameter `alpha` or set ``distribution`` to `rujopakarn` (default parameters)
* `peaked`: age distribution that peaks at some early time, written in the form P(t) ~ (t-t0)/(t\^2+t1\^2)\^2. You can specify the parameters `t0` and `t1` or set ``distribution`` to `aumer_peaked` or `just_peaked`
* `aumer` or `aumer_exponential`: exponential age distribution with parameters from Aumer & Binney (2009): beta = 0.117
* `aumer_double`: double exponential age distribution with parameters from Aumer & Binney (2009): beta = 0.348, lambda = 2.0, a = 1.e-8
* `aumer_peaked`: peaked age distribution with parameters from Aumer & Binney (2009): t0 = XXX, t1 = XXX
* `just` or `just_exponential: exponential age distribution with parameters from Just & Jahriess (2010): beta = 0.125
* `just_peaked_a`: peaked age distribution with parameters from Just & Jahriess (2010) Model A: t0 = 5.6, t1 = 8.2
* `just_peaked` or `just_peaked_b`: peaked age distribution with parameters from Just & Jahriess (2010) Model B: t0 = 1.13, t1 = 7.8
* `miller`: exponential age distribution with parameters from Miller & Scalo (1979): beta = max age / 2
* `rujopakarn`: cosmic age distribution with parameters from Rujopakarn et al. (2010): beta = max age / 2
* `input`: user specified age distribution or star formation history; ``input`` must be set to a 2 x N array specifying age and distribution
:param: distribution can also be set to a 2 x N array specifying an age distribution or star formation history; the first vector should be the ages for the function and the second vector the distribution function
:param: parameters: dictionary containing the parameters for the age distribution/star formation model being used; options include:
* `alpha`: power law factor for cosmic age distribution
* `beta`: power factor in exponential age distribution; positive beta implies a star formation rate that decreases with time
* `lambda`: second power factor in double exponential age distribution; positive lambda implies a star formation rate that decreases with time
* `a`: relative scale factor for second exponential in double exponential age distribution
* `tau`: 1/beta scale factor in exponential age distribution
* `t0` and `t1`: parameters for peaked age distribution
:param: sfh: set to True if distribution is a star formation history rather than an age distribution (default = False)
:param: verbose: Give feedback (default = False)
Output:
An array of ages drawn from the desired distribution in units of Gyr
:Example:
>>> import splat
>>> import matplotlib.pyplot as plt
>>> ages = splat.simulateAges(10000,distribution='aumer',age_range=[0.3,8.0])
>>> plt.hist(ages)
[histogram of ages in range 0.3-8.0 Gyr]
'''
# initial parameters
distribution = kwargs.get('distribution','uniform')
allowed_distributions = ['uniform','flat','exponential','double-exponential','peaked','cosmic','aumer','aumer-double','aumer-peaked','just-peaked','just-peaked-a','just-peaked-b','miller','rujopakarn']
mn = kwargs.get('minage',0.1)
mn = kwargs.get('min',mn)
mx = kwargs.get('maxage',10.)
mx = kwargs.get('max',mx)
sfh = kwargs.get('sfh',False)
age_range = kwargs.get('age_range',[mn,mx])
age_range = kwargs.get('range',age_range)
verbose = kwargs.get('verbose',False)
# protective offset
if age_range[0] == age_range[1]:
age_range[1]+=0.0001
# set default parameters
if kwargs.get('parameters',False) == False:
parameters = {}
else:
parameters = kwargs['parameters']
if 'beta' not in list(parameters.keys()):
parameters['beta'] = 1.0
if 'tau' not in list(parameters.keys()):
parameters['tau'] = 1./parameters['beta']
if 'alpha' not in list(parameters.keys()):
parameters['alpha'] = 3.5
if 'lambda' not in list(parameters.keys()):
parameters['lambda'] = 2.0
if 'a' not in list(parameters.keys()):
parameters['a'] = 1.e-8
if 't0' not in list(parameters.keys()):
parameters['t0'] = 1.13
if 't1' not in list(parameters.keys()):
parameters['t1'] = 7.8
#
# exponential
if distribution.lower() == 'exponential' or distribution.lower() == 'aumer' or distribution.lower() == 'miller':
if verbose: print('using exponential distribution')
if distribution.lower() == 'aumer':
parameters['beta'] = 0.117
if distribution.lower() == 'miller':
parameters['beta'] = 0.5*numpy.max(age_range)
if distribution.lower() == 'just':
parameters['beta'] = 0.125
# use CDF sampling
if parameters['beta'] != 0.:
x = numpy.linspace(numpy.min(age_range),numpy.max(age_range),num=10000)
y = numpy.exp(parameters['beta']*x)
y -= numpy.min(y)
y /= numpy.max(y)
f = interp1d(y,x)
ages = f(numpy.random.uniform(size=num))
else:
ages = numpy.random.uniform(numpy.min(age_range), numpy.max(age_range), size=num)
# double exponential
elif distribution.lower() == 'double_exponential' or distribution.lower() == 'aumer_double':
if verbose: print('using double exponential distribution')
if distribution.lower() == 'aumer_double':
parameters['beta'] = 0.348
parameters['lambda'] = 2.0
parameters['a'] = 1.e-8
# use CDF sampling
x = numpy.linspace(numpy.min(age_range),numpy.max(age_range),num=10000)
y = parameters['a']*numpy.exp(parameters['lambda']*x) + numpy.exp(parameters['beta']*x)
y -= numpy.min(y)
y /= numpy.max(y)
f = interp1d(y,x)
ages = f(numpy.random.uniform(size=num))
# peaked distribution
elif distribution.lower() == 'peaked' or distribution.lower() == 'just_peaked' or distribution.lower() == 'just_peaked_a' or distribution.lower() == 'just_peaked_b' or distribution.lower() == 'aumer_peaked':
if verbose: print('using peaked distribution')
# Aumer & Binney 2009
if distribution.lower() == 'aumer_peaked':
parameters['t0'] = 0.
parameters['t1'] = 7.23
# Just & Jahriess 2010 Model A
if distribution.lower() == 'just_peaked_a':
parameters['t0'] = 5.6
parameters['t1'] = 8.2
sfh = True
# Just & Jahriess 2010 Model B (default)
if distribution.lower() == 'just_peaked' or distribution.lower() == 'just_peaked_b':
parameters['t0'] = 1.13
parameters['t1'] = 7.8
sfh = True
# generate CDF by integration and then do CDF sampling
# note that function is slightly different for the two forms
x = numpy.linspace(numpy.min(age_range),numpy.max(age_range),num=10000)
if 'just' in distribution:
y = (x+parameters['t0'])/((x**2+parameters['t1']**2)**2)
# print(2./3.*(t0**2+0.75*t1**2)**0.5 - 2./3.*t0)
else:
y = (14.-x+parameters['t0'])/(((14.-x)**2+parameters['t1']**2)**2)
# print(14.-2./3.*(t0**2+0.75*t1**2)**0.5 - 2./3.*t0)
yc = numpy.cumsum(y)
yc -= numpy.min(yc)
yc /= numpy.max(yc)
f = interp1d(yc,x)
ages = f(numpy.random.uniform(size=num))
# cosmic star formation rate
elif distribution.lower() == 'cosmic' or distribution.lower() == 'rujopakarn':
if verbose: print('using cosmic SFH distribution')
if distribution.lower() == 'rujopakarn':
parameters['alpha'] = 3.5
cosmo = Planck15 # in case we want to change later
zrng = [z_at_value(cosmo.lookback_time,numpy.min(age_range)*u.Gyr),z_at_value(cosmo.lookback_time,numpy.max(age_range)*u.Gyr)]
# use CDF sampling
x = numpy.linspace(numpy.min(zrng),numpy.max(zrng),num=10000)
y = (x+1.)**parameters['alpha']
y -= numpy.min(y)
y /= numpy.max(y)
f = interp1d(y,x)
z = f(numpy.random.uniform(size=num))
ages = cosmo.lookback_time(z)
# uniform distribution (default)
elif distribution.lower() == 'uniform' or distribution.lower() == 'flat':
if verbose: print('using uniform distribution')
ages = numpy.random.uniform(numpy.min(age_range), numpy.max(age_range), size=num)
if sfh:
if verbose: print('reversing ages (SFH)')
ages = numpy.max(ages)-ages
return ages
def simulateMasses(num,**kwargs):
'''
:Purpose: Generates a distribution of masses based on the defined input distribution.
Required Inputs:
:param: num: number of masses to generate
Optional Inputs:
:param: mass_range: range of masses to draw from (default = [0.01,0.1]); can also specify ``range``, ``minmass`` or ``min``, and ``maxmass`` or ``max``
:param: distribution: can be a string set to one of the following to define the type of mass distribution to sample:
* `uniform`: uniform distribution (default)
* `powerlaw` or `power-law`: single power-law distribution, P(M) ~ M\^-alpha. You must specify the parameter `alpha` or set ``distribution`` to TBD
* `broken-powerlaw' or `broken-power-law: a broken power-law distribution; segments are specified by the parameters `alpha` (N array of numbers) for the slopes and `ranges` (N array of 2-element arrays) for the ranges over which these slopes occur; if the `scales` parameter is also included, the power-law segments are scaled by these factors; otherwise, the segments are forced to be continuous. You can also set ``distribution`` to `kroupa`
* 'lognormal` or `log-normal`: log normal distribution, P(M) ~ exp(-0.5*(M-M0)\^2/sigmaM^2). You must specify the parameters `M0` and `sigmaM` or set ``distribution`` to `chabrier` (default parameters)
* `kroupa`: broken power-law distribution with parameters from Kroupa et al. (XXXX): XXXX
* `chabrier`: lognormal distribution with parameters from Chabrier et al. (XXX): XXXXX
:param: distribution can also be set to a 2 x N array specifying the mass distribution; the first vector should be the masses for the distribution function and the second vector the distribution function itself
:param: parameters: dictionary containing the parameters for the age distribution/star formation model being used; options include:
* `alpha`: exponent for power-law distribution, or array of numbers giving power-law factors for broken power-law distribution
* `range`: array of 2-element arrays specifying the masses (in units of solar masses) over which the broken-law slopes are defined
* `scales`: array of numbers specifying relative scaling between the segments in the broken-law distribution
* `M0` and `sigmaM: parameters for lognormal distribution in units of solar masses
:param: verbose: Give feedback (default = False)
Output:
An array of masses drawn from the desired distribution in units of solar masses
:Example:
>>> import splat
>>> import matplotlib.pyplot as plt
>>> masses = splat.simulateMasses(10000,distribution='power-law',parameters={'alpha': 0.5},mass_range=[0.01,0.08])
}
>>> plt.hist(masses)
[histogram of masses in range 0.01-0.08 solar masses]
'''
# initial parameters
distribution = kwargs.get('distribution','powerlaw')
allowed_distributions = ['uniform','flat','powerlaw','power-law','broken-powerlaw','broken-power-law','lognormal','log-normal','kroupa','chabrier','salpeter']
mn = kwargs.get('minmass',0.01)
mn = kwargs.get('min',mn)
mx = kwargs.get('maxmass',0.1)
mx = kwargs.get('max',mx)
mass_range = kwargs.get('mass_range',[mn,mx])
mass_range = kwargs.get('range',mass_range)
verbose = kwargs.get('verbose',False)
# protective offset
if mass_range[0] == mass_range[1]:
mass_range[1]+=0.0001
# set default parameters
if kwargs.get('parameters',False) == False:
parameters = {}
else:
parameters = kwargs['parameters']
if 'alpha' not in list(parameters.keys()):
parameters['alpha'] = kwargs.get('alpha',0.5)
if 'alpha-broken' not in list(parameters.keys()):
parameters['alpha-broken'] = kwargs.get('alpha-broken',[0.3,1.3,2.3])
if 'mass-broken' not in list(parameters.keys()):
parameters['mass-broken'] = kwargs.get('mass-broken',[0.08,0.5])
if 'log-mu' not in list(parameters.keys()):
parameters['log-mu'] = kwargs.get('log-mu',0.2)
if 'log-sigma' not in list(parameters.keys()):
parameters['log-sigma'] = kwargs.get('log-sigma',0.55)
# power-law - sample from CDF
if distribution.lower() == 'power-law' or distribution.lower() == 'powerlaw' or distribution.lower() == 'salpeter':
if distribution.lower() == 'salpeter': parameters['alpha'] = 2.35
x = numpy.linspace(numpy.min(mass_range),numpy.max(mass_range),num=10000)
if parameters['alpha'] == 1.:
y = numpy.log(x)
print('alpha=1')
else:
y = x**(1.-parameters['alpha'])
# print(x,y)
y -= numpy.min(y)
y /= numpy.max(y)
f = interp1d(y,x)
# plt.plot(x,y)
masses = f(numpy.random.uniform(size=num))
# lognormal
elif distribution.lower() == 'lognormal' or distribution.lower() == 'log-normal':
masses = numpy.random.lognormal(parameters['log-mu'], parameters['log-sigma'], num)
# broken power law
elif distribution.lower() == 'kroupa' or distribution.lower() == 'broken-power-law' or distribution.lower() == 'broken-powerlaw':
if distribution.lower() == 'kroupa':
parameters['alpha-broken'] = [0.3,1.3,2.3]
parameters['mass-broken'] = [0.08,0.5]
if len(parameters['alpha-broken'])-1 != len(parameters['mass-broken']):
raise ValueError('\nBroken Power Law should have one more alpha parameter than mass break parameter; your values are alpha = {} and masses = {}'.format(parameters['alpha-broken'],parameters['mass-broken']))
yfull = []
xfull = []
mlow = numpy.min(mass_range)
for i,mb in enumerate(parameters['mass-broken']):
if mlow < mb and mlow < numpy.max(mass_range):
# print(mb,mlow,numpy.min([mb,numpy.max(mass_range)]))
x = numpy.linspace(mlow,numpy.min([mb,numpy.max(mass_range)]),num=10000)
y = x**(-1.*parameters['alpha-broken'][i])
y -= y[0]
if len(yfull) > 0: y += yfull[-1]
yfull.extend(y)
xfull.extend(x)
mlow = mb
if mlow < numpy.max(mass_range):
# print(mlow,numpy.max(mass_range))
x = numpy.linspace(mlow,numpy.max(mass_range),num=10000)
y = x**(-1.*parameters['alpha-broken'][-1]+1.)
y -= y[0]
if len(yfull) > 0: y += yfull[-1]
yfull.extend(y)
xfull.extend(x)
# plt.loglog(xfull,[a+10 for a in yfull])
# plt.ylim([7,10])
# plt.show()
yfull -= numpy.min(yfull)
yc = numpy.cumsum(yfull)
yc -= numpy.min(yc)
yc /= numpy.max(yc)
# plt.plot(xfull,yc)
# plt.ylim([7,10])
# plt.show()
f = interp1d(yc,xfull)
masses = f(numpy.random.uniform(size=num))
# Chabrier (2005) distribution
elif distribution.lower() == 'chabrier':
# lognormal below 1 solar mass
yfull = []
xfull = []
if numpy.min(mass_range) < 1.0:
xfull = numpy.linspace(numpy.min(mass_range),1.0,num=10000)
yfull = stats.lognorm.cdf(xfull-0.2,0.55)
# salpeter above this
if numpy.max(mass_range) > 1.0:
x = numpy.linspace(1.0,numpy.max(mass_range),num=10000)
y = x**(-1.35)
y -= y[0]
if len(yfull) > 0:
y += yfull[-1]
yfull.extend(y)
xfull.extend(x)
else:
yfull = y
xfull = x
yfull -= numpy.min(yfull)
yc = numpy.cumsum(yfull)
yc -= numpy.min(yc)
yc /= numpy.max(yc)
f = interp1d(yc,xfull)
masses = f(numpy.random.uniform(size=num))
# uniform distribution (default)
elif distribution.lower() == 'uniform' or distribution.lower() == 'flat':
masses = numpy.random.uniform(numpy.min(mass_range), numpy.max(mass_range), size=num)
# wrong distribution
else:
raise NameError('\n{} distribution is not recognized; please choose from {}'.format(distribution,allowed_distributions))
return masses
def simulateMassRatios(num,**kwargs):
'''
:Purpose: Generates a distribution of mass ratios (q = M2/M1) based on the defined input distribution. It is assumed that q <= 1
Required Inputs:
:param: num: number of masses to generate
Optional Inputs:
.. _Allen (2007), ApJ 668, 492: http://adsabs.harvard.edu/abs/2007ApJ...668..492A
.. _Burgasser et al (2006), ApJS 166, 585: http://adsabs.harvard.edu/abs/2006ApJS..166..585B
:param: q_range: range of masses to draw from (default = [0.1,1.0]); can also specify ``range``, ``minq`` or ``min``, and ``maxq`` or ``max``
:param: distribution: can be a string set to one of the following to define the type of mass distribution to sample:
* `uniform`: uniform distribution (default)
* `powerlaw` or `power-law`: single power-law distribution, P(M) ~ M\^-alpha. You must specify the parameter `alpha` or set ``distribution`` to TBD
* `allen`: power-law distribution with gamma = 1.8 based on `Allen (2007), ApJ 668, 492`_
* `burgasser`: power-law distribution with gamma = 4.2 based on `Burgasser et al (2006), ApJS 166, 585`_
:param: parameters: dictionary containing the parameters for the age distribution/star formation model being used; options include:
* `gamma`: exponent for power-law distribution
:param: verbose: Give feedback (default = False)
Output:
An array of mass ratios drawn from the desired distribution
:Example:
>>> import splat
>>> import matplotlib.pyplot as plt
>>> q = splat.simulateMassRatios(100,distribution='allen'),q_range=[0.2,1.0])
}
>>> plt.hist(q)
[histogram of mass ratios in the range 0.2-1.0 solar masses]
'''
# initial parameters
allowed_distributions = ['uniform','flat','powerlaw','power-law','allen']
distribution = kwargs.get('distribution','uniform')
mn = kwargs.get('minq',0.1)
mn = kwargs.get('min',mn)
mx = kwargs.get('maxq',1.)
mx = kwargs.get('max',mx)
q_range = kwargs.get('q_range',[mn,mx])
q_range = kwargs.get('range',q_range)
verbose = kwargs.get('verbose',False)
# protective offset
if q_range[0] == q_range[1]:
q_range[0]-=0.0001
# set default parameters
if kwargs.get('parameters',False) == False:
parameters = {}
else:
parameters = kwargs['parameters']
if 'gamma' not in list(parameters.keys()):
parameters['gamma'] = kwargs.get('gamma',1.8)
# power-law - sample from CDF
if distribution.lower() == 'power-law' or distribution.lower() == 'powerlaw' or distribution.lower() == 'allen' or distribution.lower() == 'burgasser':
if distribution.lower() == 'allen' or kwargs.get('allen',False) == True: parameters['gamma'] = 1.8
if distribution.lower() == 'burgasser' or kwargs.get('burgasser',False) == True: parameters['gamma'] = 4.2
x = numpy.linspace(numpy.min(q_range),numpy.max(q_range),num=10000)
if parameters['gamma'] == 1.:
y = numpy.log(x)
else:
y = x**(1.-parameters['gamma'])
# print(x,y)
y -= numpy.min(y)
y /= numpy.max(y)
f = interp1d(y,x)
# plt.plot(x,y)
q = f(numpy.random.uniform(size=num))
# uniform distribution (default)
elif distribution.lower() == 'uniform' or distribution.lower() == 'flat':
q = numpy.random.uniform(numpy.min(q_range), numpy.max(q_range), size=num)
# wrong distribution
else:
raise NameError('\n{} distribution is not recognized; please choose from {}'.format(distribution,allowed_distributions))
return q
def simulateSpatialDistribution(**kwargs):
pass
def simulateBinaryOrbits(**kwargs):
pass
def simulateGalacticOrbits(**kwargs):
pass
def simulateKinematics(**kwargs):
pass
def simulatePhotometry(**kwargs):
pass
def simulatePopulation(**kwargs):
parameters = {}
# draw ages - DONE
age_kwargs = kwargs.get('age_parameters',{})
parameters['age'] = simulateAges(num,**age_kwargs)
# draw masses - DONE
mass_kwargs = kwargs.get('mass_parameters',{})
parameters['mass'] = simulateMasses(num,**mass_kwargs)
# extract evolutionary model parameters
model_kwargs = kwargs.get('model_parameters',{})
mp = modelParameters(mass=parameters['mass'],age=parameters['age'],**model_kwargs)
parameters['gravity'] = mp['gravity']
parameters['luminosity'] = mp['luminosity']
parameters['radius'] = mp['radius']
parameters['temperature'] = mp['temperature']
# determine spectral types from teff - DONE
# COULD ALSO DO THIS WITH LUMINOSITIES
spt_kwargs = kwargs.get('spt_parameters',{})
sp0 = numpy.linspace(10,40,300)
tf0 = numpy.array([splat.typeToTeff(spi,**spt_kwargs)[0] for spi in sp0])
sp = sp0[~numpy.isnan(tf0)]
tf = tf0[~numpy.isnan(tf0)]
f_teff_spt = interp1d(tf,sp,bounds_error=False,fill_value=numpy.nan)
spt = [f_teff_sp(t.value) for t in mp['temperature']]
spt = numpy.array(spt)
parameters['spt'] = numpy.array(spt)
# add binary companions if desired
if kwargs.get('binaries',False) == True:
binary_kwargs = kwargs.get('binary_parameters',{})
parameters['q'] = simulateMassRatios(num,**binary_kwargs)
parameters['mass2'] = numpy.array(parameters['q'])*numpy.array(parameters['mass'])
mp = modelParameters(mass=parameters['mass2'],age=parameters['age'],**model_kwargs)
parameters['gravity2'] = mp['gravity']
parameters['luminosity2'] = mp['luminosity']
parameters['radius2'] = mp['radius']
parameters['temperature2'] = mp['temperature']
spt2 = [f_teff_spt(t.value) for t in mp['temperature2']]
spt2 = numpy.array(spt2)
parameters['spt2'] = numpy.array(spt2)
# assign binary orbital properties if desired
# assign sky positions if desired
# assign distances based on density profile if desired
# assign absolute, systemic and apparent magnitudes if desired
# assign age-dependent kinematics if desired
# assign proper and radial motions if desired
# assign apparent binary properties - current projected separation, astrometric offset, primary & secondary RV offsets - if desired
# assign metallicities (?) if desired
# visualize output?
return parameters
| [
"raliclo@gmail.com"
] | raliclo@gmail.com |
bb804b5c1ee9166b23fa7afc639d8fc0121178a5 | d0801bc2efc2d66bf371cd532f43a53b0aaad935 | /simpleui/templatetags/simpletags.py | 40e99e83be8846d87e6e0bf0f27fe00bf21745d8 | [] | no_license | cn-zhangcx/simpleui | 6702d274a1a5cced11ffbaf9ec0c0b3c20235ecf | 54583ef4d033f4eb62e18f1f70f83df5004165eb | refs/heads/master | 2020-05-14T23:38:54.127174 | 2019-04-16T05:25:03 | 2019-04-16T05:25:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,662 | py | # -*- coding: utf-8 -*-
import django
from django import template
from django.utils.html import format_html
from django.conf import settings
from django.utils.safestring import mark_safe
from django.templatetags import static
import os
import json
import platform
import socket
import simpleui
import base64
import time
from django.db import models
register = template.Library()
@register.filter
def get_icon(name):
# 默认为文件图标
cls = ""
return format_html('<i class="icon {}"></i>', cls)
@register.simple_tag(takes_context=True)
def context_test(context):
print(context)
pass
# context.get('cl').filter_specs[1].links
@register.simple_tag(takes_context=True)
def load_dates(context):
data = {}
cl = context.get('cl')
if cl.has_filters:
for spec in cl.filter_specs:
field = spec.field
field_type = None
if isinstance(field, models.DateTimeField):
field_type = 'datetime'
elif isinstance(field, models.DateField):
field_type = 'date'
elif isinstance(field, models.TimeField):
field_type = 'time'
if field_type:
data[field.name] = field_type
context['date_field'] = data
return '<script type="text/javascript">var searchDates={}</script>'.format(json.dumps(data))
@register.filter
def get_date_type(spec):
field = spec.field
field_type = ''
if isinstance(field, models.DateTimeField):
field_type = 'datetime'
elif isinstance(field, models.DateField):
field_type = 'date'
elif isinstance(field, models.TimeField):
field_type = 'time'
return field_type
@register.filter
def test(obj):
print(obj)
# pass
return ''
@register.filter
def to_str(obj):
return str(obj)
@register.filter
def date_to_json(obj):
return json.dumps(obj.date_params)
@register.simple_tag(takes_context=True)
def home_page(context):
'''
处理首页,通过设置判断打开的是默认页还是自定义的页面
:return:
'''
home = __get_config('SIMPLEUI_HOME_PAGE')
if home:
context['home'] = home
title = __get_config('SIMPLEUI_HOME_TITLE')
if not title:
title = '首页'
icon = __get_config('SIMPLEUI_HOME_ICON')
if not icon:
icon = 'el-icon-menu'
context['title'] = title
context['icon'] = icon
return ''
def __get_config(name):
value = os.environ.get(name, getattr(settings, name, None))
return value
@register.filter
def get_config(key):
return __get_config(key)
@register.simple_tag
def get_server_info():
dict = {
'Network': platform.node(),
'OS': platform.platform(),
}
try:
dict['IP'] = socket.gethostbyname(socket.gethostname())
except:
dict['IP'] = '无法获取'
return format_table(dict)
@register.simple_tag
def get_app_info():
dict = {
'Python': platform.python_version(),
'Django': django.get_version(),
'Simpleui': simpleui.get_version()
}
return format_table(dict)
def format_table(dict):
html = '<table class="simpleui-table"><tbody>'
for key in dict:
html += '<tr><th>{}</th><td>{}</td></tr>'.format(key, dict.get(key))
html += '</tbody></table>'
return format_html(html)
@register.simple_tag(takes_context=True)
def menus(context):
data = []
# return request.user.has_perm("%s.%s" % (opts.app_label, codename))
config = get_config('SIMPLEUI_CONFIG')
# 如果有menu 就读取,没有就调用系统的
if config and 'menus' in config:
data=config.get('menus')
pass
else:
app_list = context.get('app_list')
for app in app_list:
models = []
if app.get('models'):
for m in app.get('models'):
models.append({
'name': str(m.get('name')),
'icon': get_icon(m.get('object_name')),
'url': m.get('admin_url'),
'addUrl': m.get('add_url'),
'breadcrumbs': [str(app.get('name')), str(m.get('name'))]
})
module = {
'name': str(app.get('name')),
'icon': get_icon(app.get('app_label')),
'models': models
}
data.append(module)
return '<script type="text/javascript">var menus={}</script>'.format(json.dumps(data))
def get_icon(obj):
dict = {
'auth': 'fas fa-shield-alt',
'User': 'far fa-user',
'Group': 'fas fa-users-cog'
}
temp = dict.get(obj)
if not temp:
return 'far fa-file'
return temp
@register.simple_tag(takes_context=True)
def load_message(context):
messages = context.get('messages')
array = []
if messages:
for msg in messages:
array.append({
'msg': msg.message,
'tag': msg.tags
})
return '<script type="text/javascript"> var messages={}</script>'.format(array)
@register.simple_tag(takes_context=True)
def context_to_json(context):
json_str = '{}'
return mark_safe(json_str)
@register.simple_tag()
def get_language():
return django.utils.translation.get_language()
@register.filter
def get_language_code(val):
return django.utils.translation.get_language()
def get_analysis_config():
val = __get_config('SIMPLEUI_ANALYSIS')
if not val and val == False:
return False
return True
@register.simple_tag(takes_context=True)
def load_analysis(context):
try:
if get_analysis_config() == False:
return ''
# 理论上值一天只上报一次
key = 'simpleui_' + time.strftime('%Y%m%d', time.localtime())
if key in context.request.session:
return ''
b64 = ""
j = {
"n": platform.node(),
"o": platform.platform(),
"p": platform.python_version(),
"d": django.get_version(),
"s": simpleui.get_version(),
}
if 'theme_name' in context.request.COOKIES:
j['t'] = context.request.COOKIES['theme_name']
else:
j['t'] = 'Default'
b64 = base64.b64encode(str(j).encode('utf-8'))
url = '//simpleui.88cto.com/analysis'
b64 = b64.decode('utf-8')
html = '<script async type="text/javascript" src="{}/{}"></script>'.format(url, b64);
context.request.session[key] = True
return mark_safe(html)
except:
return ''
| [
"newpanjing@163.com"
] | newpanjing@163.com |
1d64cc710271280fcbcc423dd7aff0ce53d04cca | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/ec_12348-3001/sdB_ec_12348-3001_coadd.py | 886846dc958ceb1fa79b323df15547fbb52c6935 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[189.3895,-30.306328], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_ec_12348-3001/sdB_ec_12348-3001_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_ec_12348-3001/sdB_ec_12348-3001_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
4dcd7b99114efd81e8ac45c656c7038f8db526f7 | 1ff9adfdb9d559e6f81ed9470467bab25e93b5ab | /src/ta_lib/_vendor/tigerml/automl/custom_configs/classification/popular.py | e9ec0db5fa11ef900db2a1a6bbcf1db75e7db9ab | [] | no_license | Seemant-tiger/housing-price-prediction | a39dbefcb11bc460edeeee92e6becf77d35ff3a8 | be5d8cca769c7e267cfee1932eb82b70c2855bc1 | refs/heads/main | 2023-06-24T00:25:49.776720 | 2021-07-18T16:44:28 | 2021-07-18T16:44:28 | 387,222,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,149 | py | # -*- coding: utf-8 -*-
"""This file is part of the TPOT library.
TPOT was primarily developed at the University of Pennsylvania by:
- Randal S. Olson (rso@randalolson.com)
- Weixuan Fu (weixuanf@upenn.edu)
- Daniel Angell (dpa34@drexel.edu)
- and many more generous open source contributors
TPOT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
TPOT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with TPOT. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
# Check the TPOT documentation for information on the structure of config dicts
config = {
# Classifiers
"sklearn.ensemble.RandomForestClassifier": {
"n_estimators": [100, 200, 400],
"criterion": ["gini", "entropy"],
"max_features": np.arange(0.05, 1.01, 0.05),
"min_samples_split": range(2, 21),
"min_samples_leaf": range(1, 21),
"bootstrap": [True, False],
},
"sklearn.ensemble.GradientBoostingClassifier": {
"n_estimators": [100],
"learning_rate": [1e-3, 1e-2, 1e-1, 0.5, 1.0],
"max_depth": range(1, 11),
"min_samples_split": range(2, 21),
"min_samples_leaf": range(1, 21),
"subsample": np.arange(0.05, 1.01, 0.05),
"max_features": np.arange(0.05, 1.01, 0.05),
},
"sklearn.svm.LinearSVC": {
"penalty": ["l1", "l2"],
"loss": ["hinge", "squared_hinge"],
"dual": [True, False],
"tol": [1e-5, 1e-4, 1e-3, 1e-2, 1e-1],
"C": [1e-4, 1e-3, 1e-2, 1e-1, 0.5, 1.0, 5.0, 10.0, 15.0, 20.0, 25.0],
},
"sklearn.linear_model.LogisticRegression": {
"penalty": ["l1", "l2"],
"C": [1e-4, 1e-3, 1e-2, 1e-1, 0.5, 1.0, 5.0, 10.0, 15.0, 20.0, 25.0],
"dual": [True, False],
},
"xgboost.XGBClassifier": {
"n_estimators": [100],
"max_depth": range(1, 11),
"learning_rate": [1e-3, 1e-2, 1e-1, 0.5, 1.0],
"subsample": np.arange(0.05, 1.01, 0.05),
"min_child_weight": range(1, 21),
"nthread": [1],
},
# Preprocesssors
"sklearn.preprocessing.Binarizer": {"threshold": np.arange(0.0, 1.01, 0.05)},
# 'sklearn.decomposition.FastICA': {
# 'tol': np.arange(0.0, 1.01, 0.05)
# },
"sklearn.cluster.FeatureAgglomeration": {
"linkage": ["ward", "complete", "average"],
"affinity": ["euclidean", "l1", "l2", "manhattan", "cosine"],
},
"sklearn.preprocessing.MaxAbsScaler": {},
"sklearn.preprocessing.MinMaxScaler": {},
"sklearn.preprocessing.Normalizer": {"norm": ["l1", "l2", "max"]},
# 'sklearn.kernel_approximation.Nystroem': {
# 'kernel': ['rbf', 'cosine', 'chi2', 'laplacian', 'polynomial', 'poly', 'linear', 'additive_chi2', 'sigmoid'],
# 'gamma': np.arange(0.0, 1.01, 0.05),
# 'n_components': range(1, 11)
# },
# 'sklearn.decomposition.PCA': {
# 'svd_solver': ['randomized'],
# 'iterated_power': range(1, 11)
# },
# 'sklearn.preprocessing.PolynomialFeatures': {
# 'degree': [2],
# 'include_bias': [False],
# 'interaction_only': [False]
# },
# 'sklearn.kernel_approximation.RBFSampler': {
# 'gamma': np.arange(0.0, 1.01, 0.05)
# },
"sklearn.preprocessing.RobustScaler": {},
"sklearn.preprocessing.StandardScaler": {},
# 'tpot.builtins.ZeroCount': {
# },
"tpot.builtins.OneHotEncoder": {
"minimum_fraction": [0.05, 0.1, 0.15, 0.2, 0.25],
"sparse": [False],
"threshold": [10],
},
# Selectors
"sklearn.feature_selection.SelectFwe": {
"alpha": np.arange(0, 0.05, 0.001),
"score_func": {"sklearn.feature_selection.f_classif": None},
},
"sklearn.feature_selection.SelectPercentile": {
"percentile": range(1, 100),
"score_func": {"sklearn.feature_selection.f_classif": None},
},
"sklearn.feature_selection.VarianceThreshold": {
"threshold": [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.2]
},
"sklearn.feature_selection.RFE": {
"step": np.arange(0.05, 1.01, 0.05),
"estimator": {
"sklearn.ensemble.ExtraTreesClassifier": {
"n_estimators": [100],
"criterion": ["gini", "entropy"],
"max_features": np.arange(0.05, 1.01, 0.05),
}
},
},
"sklearn.feature_selection.SelectFromModel": {
"threshold": np.arange(0, 1.01, 0.05),
"estimator": {
"sklearn.ensemble.ExtraTreesClassifier": {
"n_estimators": [100],
"criterion": ["gini", "entropy"],
"max_features": np.arange(0.05, 1.01, 0.05),
}
},
},
}
| [
"seemantsingh1199@gmail.com"
] | seemantsingh1199@gmail.com |
3ff6bc094a8db9af718df9fc0297f36c708d2486 | cfd39f470d13f089d7d853b7ea0d0260c0ae6f95 | /experiments/inverse_problems_science/dkfz_eval.py | 0e57426c957fd763d7b16883f195603beaceace0 | [
"MIT"
] | permissive | JackBrady/FrEIA | d1c32c1c6a60a43b25d0011d3525dcd5cdcb4244 | 5d836494489e5b210a577815b89c4c2988fd8658 | refs/heads/master | 2020-12-22T07:15:51.914427 | 2020-05-07T02:48:19 | 2020-05-07T02:48:19 | 236,707,547 | 0 | 0 | MIT | 2020-01-28T10:18:01 | 2020-01-28T10:18:00 | null | UTF-8 | Python | false | false | 3,777 | py | import pickle
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import torch
import dkfz_train
import model
import config as c
model.load('output/dkfz_inn.pt')
print('Trainable parameters:')
print(sum([p.numel() for p in model.params_trainable]))
def concatenate_test_set():
x_all, y_all = [], []
for x,y in c.test_loader:
x_all.append(x)
y_all.append(y)
return torch.cat(x_all, 0), torch.cat(y_all, 0)
x_all, y_all = concatenate_test_set()
def sample_posterior(y_it, N=4096):
outputs = []
for y in y_it:
rev_inputs = torch.cat([torch.randn(N, c.ndim_z + c.ndim_pad_zy),
torch.zeros(N, c.ndim_y)], 1).to(c.device)
if c.ndim_pad_zy:
rev_inputs[:, c.ndim_z:-c.ndim_y] *= c.add_pad_noise
rev_inputs[:, -c.ndim_y:] = y
with torch.no_grad():
x_samples = model.model(rev_inputs, rev=True)
outputs.append(x_samples.data.cpu().numpy())
return outputs
def show_posteriors():
# how many different posteriors to show:
n_plots = 5
# how many dimensions of x to use:
n_x = 3
def hists(x):
results = []
for j in range(n_x):
h, b = np.histogram(x[:, j], bins=100, range=(-2,2), density=True)
h /= np.max(h)
results.append([b[:-1],h])
return results
prior_hists = hists(x_all)
x_gt = x_all[:n_plots]
y_gt = y_all[:n_plots]
posteriors = sample_posterior(y_gt)
confidence = 0.68
q_low = 100. * 0.5 * (1 - confidence)
q_high = 100. * 0.5 * (1 + confidence)
for i in range(n_plots):
hist_i = hists(posteriors[i])
for j in range(n_x):
plt.subplot(n_plots, n_x, n_x*i + j + 1)
plt.step(*(prior_hists[j]), where='post', color='grey')
plt.step(*(hist_i[j]), where='post', color='blue')
x_low, x_high = np.percentile(posteriors[i][:,j], [q_low, q_high])
plt.plot([x_gt[i,j], x_gt[i,j]], [0,1], color='black')
plt.plot([x_low, x_low], [0,1], color='orange')
plt.plot([x_high, x_high], [0,1], color='orange')
plt.tight_layout()
def calibration_error():
# which parameter to look at (0: SO2)
x_ind = 0
# how many different confidences to look at
n_steps = 100
q_values = []
confidences = np.linspace(0., 1., n_steps+1, endpoint=False)[1:]
uncert_intervals = [[] for i in range(n_steps)]
inliers = [[] for i in range(n_steps)]
for conf in confidences:
q_low = 0.5 * (1 - conf)
q_high = 0.5 * (1 + conf)
q_values += [q_low, q_high]
from tqdm import tqdm
for x,y in tqdm(zip(x_all, y_all), total=x_all.shape[0], disable=False):
post = sample_posterior([y])[0][:, x_ind]
x_margins = list(np.quantile(post, q_values))
for i in range(n_steps):
x_low, x_high = x_margins.pop(0), x_margins.pop(0)
uncert_intervals[i].append(x_high - x_low)
inliers[i].append(int(x[x_ind] < x_high and x[x_ind] > x_low))
inliers = np.mean(inliers, axis=1)
uncert_intervals = np.median(uncert_intervals, axis=1)
calib_err = inliers - confidences
print(F'Median calibration error: {np.median(np.abs(calib_err))}')
print(F'Calibration error at 68% confidence: {calib_err[68]}')
print(F'Med. est. uncertainty at 68% conf.: {uncert_intervals[68]}')
plt.subplot(2, 1, 1)
plt.plot(confidences, calib_err)
plt.ylabel('Calibration error')
plt.subplot(2, 1, 2)
plt.plot(confidences, uncert_intervals)
plt.ylabel('Median estimated uncertainty')
plt.xlabel('Confidence')
show_posteriors()
calibration_error()
plt.show()
| [
"lynton.ardizzone@iwr.uni-heidelberg.de"
] | lynton.ardizzone@iwr.uni-heidelberg.de |
aad1a11cfde52faaf3be013f179124ca06ee7ceb | 8ad18381b31acbfe4f81f85c19cf5c020938bb94 | /Products/migrations/0002_auto_20210814_2159.py | fae9d5ade9ad59a5e60628c1ee4a94db56c3c59e | [] | no_license | prathmesh2048/Tradexa_assignment | 52796c80c269175b22aa131cb40f4cddd7c6b14f | 7e5c4201eec35415516fafa32e81730ffabfab0c | refs/heads/master | 2023-07-08T12:06:24.151983 | 2021-08-14T16:38:36 | 2021-08-14T16:38:36 | 396,067,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | # Generated by Django 3.2.6 on 2021-08-14 16:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Products', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='title',
new_name='name',
),
migrations.AddField(
model_name='product',
name='price',
field=models.PositiveBigIntegerField(null=True),
),
migrations.AddField(
model_name='product',
name='weight',
field=models.PositiveBigIntegerField(null=True),
),
]
| [
"prathmeshnandurkar123@gmail.com"
] | prathmeshnandurkar123@gmail.com |
ab43ee6cdbe330f55b39387b7e1104be8e5bd9e4 | 518bf342bc4138982af3e2724e75f1d9ca3ba56c | /solutions/1392. Longest Happy Prefix/1392.py | b1c6d9edded10615aa7d40e3a7f7c328eae58194 | [
"MIT"
] | permissive | walkccc/LeetCode | dae85af7cc689882a84ee5011f0a13a19ad97f18 | a27be41c174565d365cbfe785f0633f634a01b2a | refs/heads/main | 2023-08-28T01:32:43.384999 | 2023-08-20T19:00:45 | 2023-08-20T19:00:45 | 172,231,974 | 692 | 302 | MIT | 2023-08-13T14:48:42 | 2019-02-23T15:46:23 | C++ | UTF-8 | Python | false | false | 568 | py | class Solution:
def longestPrefix(self, s: str) -> str:
kBase = 26
kMod = 1_000_000_007
n = len(s)
maxLength = 0
pow = 1
prefixHash = 0 # hash of s[0..i]
suffixHash = 0 # hash of s[j..n)
def val(c: str) -> int:
return ord(c) - ord('a')
j = n - 1
for i in range(n - 1):
prefixHash = (prefixHash * kBase + val(s[i])) % kMod
suffixHash = (val(s[j]) * pow + suffixHash) % kMod
pow = pow * kBase % kMod
if prefixHash == suffixHash:
maxLength = i + 1
j -= 1
return s[:maxLength]
| [
"me@pengyuc.com"
] | me@pengyuc.com |
e33eb0468b760e44fa55435a7441d67b25911100 | e46392a087706a3cbd726822a3735892becaeaac | /selfsup/multi/methods/video_saliency.py | fcbddd3435c09c6bfe31f73ff153377b4cad6904 | [
"BSD-3-Clause"
] | permissive | Pandinosaurus/self-supervision | 39d44d864d140e35bb35fc76adb7412403ea92ea | fe99e707bcccc0eed39dfe8a4651c433caf6f8c4 | refs/heads/master | 2021-04-06T03:06:38.970572 | 2017-09-05T03:29:46 | 2017-09-05T03:34:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,857 | py | from __future__ import division, print_function, absolute_import
import selfsup
import tensorflow as tf
import os
from .base import Method
from collections import OrderedDict
class VideoSaliency(Method):
def __init__(self, name, basenet, loader):
self.name = name
self.basenet = basenet
self._loader = loader
self._classes = 32
@property
def basenet_settings(self):
return {'convolutional': False}
def batch(self):
x, extra = self._loader.batch()
assert 'saliency' in extra
return x, extra
def build_network(self, network, extra, phase_test, global_step):
info = selfsup.info.create(scale_summary=True)
z = network['activations']['top']
logits = self.basenet.decoder(z, channels=self._classes, multiple=4)
y = tf.image.resize_bilinear(extra['saliency'], logits.get_shape().as_list()[1:3])
labels = tf.to_int32(tf.floor(y[..., 0] * self._classes * 0.99999))
with tf.variable_scope('primary_loss'):
loss_each = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)
primary_loss = tf.reduce_mean(loss_each)
#with tf.name_scope('weight_decay'):
#wd = 0.0005
#l2_loss = tf.nn.l2_loss(fc8W)
#weight_decay = wd * l2_loss
with tf.name_scope('loss'):
loss = primary_loss
variables = info['vars']
self.losses = OrderedDict([('main', primary_loss)])
self.primary_loss = primary_loss
self.loss = loss
self.feedback_variables = []
info['activations']['primary_loss'] = primary_loss
info['activations']['loss'] = loss
#info['activations']['weight_decay'] = weight_decay
return info
def feedback(self, variables, iteration):
pass
| [
"gustav.m.larsson@gmail.com"
] | gustav.m.larsson@gmail.com |
c102fd6f1a8a65e61fd3accf92c9d824f2db3265 | fe04ba6b41745a4f16864aaacf9f1b0005a92e37 | /src/TCA/execute.py | b1aeeaf1678b10a4f004860b04abf12c365294b5 | [] | no_license | bellwethers-in-se/effort | 3148552866911949c4dcf16f3e77ace65b113f5b | 1138919c16036c58d11c6764cdcac89026f0d36d | refs/heads/master | 2021-01-12T02:14:23.456485 | 2017-08-07T17:36:22 | 2017-08-07T17:36:22 | 78,490,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,977 | py | from __future__ import print_function, division
import os
import sys
root = os.path.join(os.getcwd().split('src')[0], 'src')
if root not in sys.path:
sys.path.append(root)
from oracle.model import rf_model
from utils import *
from mklaren.kernel.kinterface import Kinterface
from mklaren.kernel.kernel import *
from mklaren.projection.icd import ICD
from pdb import set_trace
import numpy as np
from scipy.spatial.distance import pdist, squareform
import pandas
from tabulate import tabulate
from datasets.handler import get_all_datasets
from random import uniform
def get_kernel_matrix(dframe, n_dim=15):
"""
This returns a Kernel Transformation Matrix $\Theta$
It uses kernel approximation offered by the MKlaren package
For the sake of completeness (and for my peace of mind, I use the best possible approx.)
:param dframe: input data as a pandas dataframe.
:param n_dim: Number of dimensions for the kernel matrix (default=15)
:return: $\Theta$ matrix
"""
ker = Kinterface(data=dframe.values, kernel=linear_kernel)
model = ICD(rank=n_dim)
model.fit(ker)
g_nystrom = model.G
return g_nystrom
def map_transform(src, tgt, n_components=10):
"""
Run a map and transform x and y onto a new space using TCA
:param src: IID samples
:param tgt: IID samples
:return: Mapped x and y
"""
s_col = [col for col in src.columns[:-1] if '?' not in col]
t_col = [col for col in tgt.columns[:-1] if '?' not in col]
S = src[s_col]
T = tgt[t_col]
# set_trace()
col_name = ["Col_" + str(i) for i in xrange(n_components)]
x0 = pd.DataFrame(get_kernel_matrix(S, n_components), columns=col_name)
y0 = pd.DataFrame(get_kernel_matrix(T, n_components), columns=col_name)
x0.loc[:, src.columns[-1]] = pd.Series(src[src.columns[-1]], index=x0.index)
y0.loc[:, tgt.columns[-1]] = pd.Series(tgt[tgt.columns[-1]], index=y0.index)
return x0, y0
def predict_defects(train, test, weka=False, cutoff=0.6):
"""
:param train:
:type train:
:param test:
:type test:
:param weka:
:type weka:
:return:
"""
actual = test[test.columns[-1]].values
predicted = rf_model(train, test)
return actual, predicted
def get_dcv(src, tgt):
"""Get dataset characteristic vector."""
s_col = [col for col in src.columns[:-1] if '?' not in col]
t_col = [col for col in tgt.columns[:-1] if '?' not in col]
S = src[s_col]
T = tgt[t_col]
def self_dist_mtx(arr):
try:
dist_arr = pdist(arr)
except:
set_trace()
return squareform(dist_arr)
dist_src = self_dist_mtx(S.values)
dist_tgt = self_dist_mtx(T.values)
dcv_src = [np.mean(dist_src), np.median(dist_src), np.min(dist_src), np.max(dist_src), np.std(dist_src),
len(S.values)]
dcv_tgt = [np.mean(dist_tgt), np.median(dist_tgt), np.min(dist_tgt), np.max(dist_tgt), np.std(dist_tgt),
len(T.values)]
return dcv_src, dcv_tgt
def sim(c_s, c_t, e=0):
if c_s[e] * 1.6 < c_t[e]:
return "VH" # Very High
if c_s[e] * 1.3 < c_t[e] <= c_s[e] * 1.6:
return "H" # High
if c_s[e] * 1.1 < c_t[e] <= c_s[e] * 1.3:
return "SH" # Slightly High
if c_s[e] * 0.9 <= c_t[e] <= c_s[e] * 1.1:
return "S" # Same
if c_s[e] * 0.7 <= c_t[e] < c_s[e] * 0.9:
return "SL" # Slightly Low
if c_s[e] * 0.4 <= c_t[e] < c_s[e] * 0.7:
return "L" # Low
if c_t[e] < c_s[e] * 0.4:
return "VL" # Very Low
def smart_norm(src, tgt, c_s, c_t):
"""
ARE THESE NORMS CORRECT?? OPEN AN ISSUE REPORT TO VERIFY
:param src:
:param tgt:
:param c_s:
:param c_t:
:return:
"""
try: # !!GUARD: PLEASE REMOVE AFTER DEBUGGING!!
# Rule 1
if sim(c_s, c_t, e=0) == "S" and sim(c_s, c_t, e=-2) == "S":
return src, tgt
# Rule 2
elif sim(c_s, c_t, e=2) == "VL" or "VH" \
and sim(c_s, c_t, e=3) == "VL" or "VH" \
and sim(c_s, c_t, e=-1) == "VL" or "VH":
return df_norm(src), df_norm(tgt)
# Rule 3.1
elif sim(c_s, c_t, e=-2) == "VH" and c_s[-1] > c_t[-1] or \
sim(c_s, c_t, e=-2) == "VL" and c_s[-1] < c_t[-1]:
return df_norm(src, type="normal"), df_norm(tgt)
# Rule 4
elif sim(c_s, c_t, e=-2) == "VH" and c_s[-1] < c_t[-1] or \
sim(c_s, c_t, e=-2) == "VL" and c_s[-1] > c_t[-1]:
return df_norm(src), df_norm(tgt, type="normal")
else:
return df_norm(src, type="normal"), df_norm(tgt, type="normal")
except:
set_trace()
return src, tgt
def get_mar_p0(trn, tst, n_rep):
effort = trn[trn.columns[-1]].values.tolist() \
+ tst[tst.columns[-1]].values.tolist()
hi, lo = max(effort), min(effort)
res = []
for _ in xrange(n_rep):
actual = tst[tst.columns[-1]].values
predicted = np.array([uniform(lo, hi) for __ in xrange(len(actual))])
res.append(abs((actual - predicted) / actual))
return np.mean(res)
def tca_plus(source, target, n_rep=12):
"""
TCA: Transfer Component Analysis
:param source:
:param target:
:param n_rep: number of repeats
:return: result
"""
result = dict()
for tgt_name, tgt_path in target.iteritems():
stats = []
print("{} \r".format(tgt_name[0].upper() + tgt_name[1:]))
for src_name, src_path in source.iteritems():
if not src_name == tgt_name:
src = pandas.read_csv(src_path)
tgt = pandas.read_csv(tgt_path)
# set_trace()
dcv_src, dcv_tgt = get_dcv(src, tgt)
for _ in xrange(n_rep):
norm_src, norm_tgt = smart_norm(src, tgt, dcv_src, dcv_tgt)
_train, __test = map_transform(norm_src.dropna(axis=1, inplace=False),
norm_tgt.dropna(axis=1, inplace=False))
actual, predicted = predict_defects(train=_train, test=__test)
MAR = abs((actual - predicted) / actual)
MAR_p0 = get_mar_p0(_train, __test, n_rep=1000)
SA = (1-MAR/MAR_p0)
stats.append([src_name, round(np.mean(SA), 1), round(np.std(SA), 1)]) # ,
stats = pandas.DataFrame(sorted(stats, key=lambda lst: lst[1], reverse=False), # Sort by G Score
columns=["Name", "SA (Mean)", "SA (Std)"]) # ,
print(tabulate(stats,
headers=["Name", "SA (Mean)", "SA (Std)"],
tablefmt="fancy_grid"))
result.update({tgt_name: stats})
# set_trace()
return result
def tca_jur():
all = get_all_datasets()
tca_plus(all, all, n_rep=10)
if __name__ == "__main__":
tca_jur()
| [
"i.m.ralk@gmail.com"
] | i.m.ralk@gmail.com |
51a9212611db2eb748f1976a7bc1337e75df00c1 | ecff4b18a49ce5952c5f9125dc027cebdecf10a8 | /azure-mgmt-scheduler/azure/mgmt/scheduler/models/storage_queue_message.py | 084b78ccb0f84eeaa5695d9ebae6f5beae3eb764 | [
"Apache-2.0"
] | permissive | jehine-MSFT/azure-sdk-for-python | a56c18020ecd5f4c245c093fd6a33e1b1d7c95e1 | 6d0f94b39406eab374906c683bd2150217132a9c | refs/heads/master | 2020-12-06T19:17:38.153819 | 2016-04-08T21:03:16 | 2016-04-08T21:03:16 | 55,809,131 | 0 | 0 | null | 2016-04-08T20:54:00 | 2016-04-08T20:54:00 | null | UTF-8 | Python | false | false | 1,878 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StorageQueueMessage(Model):
"""StorageQueueMessage
:param storage_account: Gets or sets the storage account name.
:type storage_account: str
:param queue_name: Gets or sets the queue name.
:type queue_name: str
:param sas_token: Gets or sets the SAS key.
:type sas_token: str
:param message: Gets or sets the message.
:type message: str
"""
_attribute_map = {
'storage_account': {'key': 'storageAccount', 'type': 'str'},
'queue_name': {'key': 'queueName', 'type': 'str'},
'sas_token': {'key': 'sasToken', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, storage_account=None, queue_name=None, sas_token=None, message=None, **kwargs):
self.storage_account = storage_account
self.queue_name = queue_name
self.sas_token = sas_token
self.message = message
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
ce49b89ea66dbef7873d318f3b750d748ff2e565 | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/redreader/testcase/firstcases/testcase3_003.py | f6c5f5c88712dbe125e58521efd7afe148cee248 | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,529 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'org.quantumbadger.redreader',
'appActivity' : 'org.quantumbadger.redreader.activities.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'org.quantumbadger.redreader/org.quantumbadger.redreader.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase003
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"askreddit\")", "new UiSelector().className(\"android.widget.TextView\").instance(8)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"More options\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"Submit Post\")", "new UiSelector().className(\"android.widget.TextView\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"View Comments\")")
TouchAction(driver).long_press(element).release().perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"3_003\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'org.quantumbadger.redreader'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"prefest2018@gmail.com"
] | prefest2018@gmail.com |
6eab960a7bf55df7445028b3576e9aca186fc866 | 9892b74a8c5913ef77725129885f9cb7b65a9eba | /src/Learners.py | 99de5284f6ebf8311521302bde1d2c32bd8329d7 | [] | no_license | ai-se/Smotuned_FFT | 8e2884eaf3192f845d638a1167f7e17dd2c07dcb | df06749cca3e507f963c4ffcb2330cdc031dded1 | refs/heads/master | 2021-09-28T05:20:01.762515 | 2018-11-14T17:26:18 | 2018-11-14T17:26:18 | 116,091,269 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,414 | py | from __future__ import print_function, division
__author__ = 'amrit'
import sys
sys.dont_write_bytecode = True
from ABCD import ABCD
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from scores import *
import smote
from helper import *
recall, precision, specificity, accuracy, f1, g, f2, d2h = 8, 7, 6, 5, 4, 3, 2, 1
def DT(train_data,train_labels,test_data):
model = DecisionTreeClassifier(criterion='entropy').fit(train_data, train_labels)
prediction=model.predict(test_data)
return prediction
def KNN(train_data,train_labels,test_data):
model = KNeighborsClassifier(n_neighbors=8,n_jobs=-1).fit(train_data, train_labels)
prediction = model.predict(test_data)
return prediction
def LR(train_data,train_labels,test_data):
model = LogisticRegression().fit(train_data, train_labels)
prediction = model.predict(test_data)
return prediction
def NB(train_data,train_labels,test_data):
model = GaussianNB().fit(train_data, train_labels)
prediction = model.predict(test_data)
return prediction
def RF(train_data,train_labels,test_data):
model = RandomForestClassifier(criterion='entropy').fit(train_data, train_labels)
prediction = model.predict(test_data)
return prediction
def SVM(train_data,train_labels,test_data):
model = LinearSVC().fit(train_data, train_labels)
prediction = model.predict(test_data)
return prediction
def evaluation(measure, prediction, test_labels, test_data):
abcd = ABCD(before=test_labels, after=prediction)
stats = np.array([j.stats() for j in abcd()])
labels = list(set(test_labels))
if labels[0] == 0:
target_label = 1
else:
target_label = 0
if measure == "accuracy":
return stats[target_label][-accuracy]
if measure == "recall":
return stats[target_label][-recall]
if measure == "precision":
return stats[target_label][-precision]
if measure == "specificity":
return stats[target_label][-specificity]
if measure == "f1":
return stats[target_label][-f1]
if measure == "f2":
return stats[target_label][-f2]
if measure == "d2h":
return stats[target_label][-d2h]
if measure == "g":
return stats[target_label][-g]
if measure == "popt20":
df1 = pd.DataFrame(prediction, columns=["prediction"])
df2 = pd.concat([test_data, df1], axis=1)
return get_popt(df2)
def main(*x):
l = np.asarray(x)
function=l[1]
measure=l[2]
data=l[3]
split = split_two(data)
pos = split['pos']
neg = split['neg']
## 20% train and grow
cut_pos, cut_neg = cut_position(pos, neg, percentage=80)
data_train, data_test = divide_train_test(pos, neg, cut_pos, cut_neg)
data_train = smote.execute(l[0].values(), samples=data_train.iloc[:, :-1], labels=data_train.iloc[:, -1:])
lab = [y for x in data_train.iloc[:, -1:].values.tolist() for y in x]
prediction=function(data_train.iloc[:, :-1].values, lab, data_test.iloc[:, :-1].values)
lab = [y for x in data_test.iloc[:, -1:].values.tolist() for y in x]
return evaluation(measure, prediction,lab, data_test)
| [
"amritbhanu@gmail.com"
] | amritbhanu@gmail.com |
a7494ef2f8a2d3630920a5abc0c0faaec8d9e6cf | 7b6c038da851b1d2a3dde5f1158fce03482bc3c1 | /luna/gateware/usb/devices/__init__.py | 7d057f0bc40e42c7c707fd866713cd9551e6e83b | [
"BSD-3-Clause",
"CERN-OHL-P-2.0"
] | permissive | greatscottgadgets/luna | 77b636481a3bef13137c7125de970e86a1fdda22 | 1d8e9cfa6a3e577f255ff3544384a1442b3b015b | refs/heads/main | 2023-09-02T02:32:58.944632 | 2023-09-01T13:00:54 | 2023-09-01T13:00:54 | 215,722,076 | 842 | 160 | BSD-3-Clause | 2023-09-13T14:59:36 | 2019-10-17T06:45:30 | Python | UTF-8 | Python | false | false | 201 | py | #
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" Convenience gateware with pre-made device logic. """
| [
"k@ktemkin.com"
] | k@ktemkin.com |
ed5228f11b7cd0a05ba799e26b2bfc45f18cc59c | 43a00df3c162b6279ebda161c2ebd85ce7f9cb64 | /generate_s_label.py | dddd6bf2e1f38891f0cb893d4fa2e17cf1543c94 | [] | no_license | selous123/SRNet-zoom-pytorch | d5845d6a8b55fef81fdc16e728371486939d243e | 08b647e121d70ca69e04719684bcdce8ca8b65d3 | refs/heads/master | 2020-07-14T15:00:31.497397 | 2020-03-06T13:47:20 | 2020-03-06T13:47:20 | 205,339,265 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,721 | py |
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="/store/dataset/SRRAW/X4/train", help="root folder that contains the images")
parser.add_argument("--labels", type=str, default="Edge+Diff", help="root folder that contains the images")
ARGS = parser.parse_args()
#traindataRootDir = "/store2/dataset/SR/train_data/SRRAW/X4/train"
traindataRootDir = ARGS.data_dir
subDirs = ["HR", "LR"]
## Generate Edge Information
import os
from PIL import Image, ImageFilter, ImageChops
file_names = os.listdir(os.path.join(traindataRootDir,subDirs[0]))
file_names.sort()
dir_label = ARGS.labels.split("+")
for d_label in dir_label:
os.makedirs(os.path.join(traindataRootDir, d_label),exist_ok=True)
for i, file_name in enumerate(file_names):
imgPath = os.path.join(traindataRootDir, subDirs[0], file_name)
desPath = os.path.join(traindataRootDir,"Edge",file_name)
img = Image.open(imgPath)
img_edge = img.filter(ImageFilter.FIND_EDGES).filter(
ImageFilter.EDGE_ENHANCE_MORE)
#.filter(ImageFilter.DETAIL)
img_edge.save(desPath)
print("file_name:%s, Index:%d" %(file_name,i))
for i, file_name in enumerate(file_names):
imgPath = os.path.join(traindataRootDir, subDirs[0], file_name)
imgLRPath = os.path.join(traindataRootDir, subDirs[1], file_name)
desPath = os.path.join(traindataRootDir,"Diff",file_name)
img = Image.open(imgPath)
img_lr = Image.open(imgLRPath)
u_img_lr = img_lr.resize(img.size)
d = ImageChops.difference(img,u_img_lr).filter(
ImageFilter.EDGE_ENHANCE_MORE).filter(ImageFilter.DETAIL)
d.save(desPath)
print("file_name:%s, Index:%d" %(file_name,i))
| [
"lrhselous@163.com"
] | lrhselous@163.com |
917a51c81202012da9c84549d807bcf36aadd226 | 04141e207a7cc88a58245fa412b2a841901d504f | /otn.mit.edu/otn/web/forms.py | 1893ee1db091bfb83f4c3f24b699f62b7486469b | [] | no_license | mwidner/socialsaver | bbda2cefea2b8f74d2bbbad2705e616a88a5d582 | 3b6a1d8f0522735e462f9d2bf6ef12a999d8cc85 | refs/heads/master | 2021-01-15T20:29:35.288919 | 2012-04-16T18:27:30 | 2012-04-16T18:27:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | from django import forms
from django.forms import ModelForm
class BuxferLoginForm(forms.Form):
email = forms.EmailField()
password = forms.CharField()
| [
"kwan@media.mit.edu"
] | kwan@media.mit.edu |
7e8f73697561d7e8ee084ee9c1e6a02d06b75021 | c24ad19b65992dd2be3d3210b889d970e43b9cdc | /class/phase1/day09/exercise05.py | 79e8a0e0b6539289fae06c370f41ccd4439e02e6 | [] | no_license | ivoryli/myproject | 23f39449a0bd23abcc3058c08149cebbfd787d12 | cebfa2594198060d3f8f439c971864e0639bbf7e | refs/heads/master | 2020-05-30T06:25:41.345645 | 2019-05-31T11:15:55 | 2019-05-31T11:15:55 | 189,578,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,982 | py | '''
创建技能类(技能名称,冷却时间,持续时间,攻击距离......)
name cd td ad
要求:使用属性封装变量
创建技能列表(技能对象的列表)
-- 查找名称是"降龙十八掌"的技能对象
-- 查找名称是持续时间大于10秒的的所有技能对象
-- 查找攻击距离最远的技能对象
-- 按照持续时间,对列表升序排列.
'''
#myself
# class Skill:
# def __init__(self,name,cd,td,ad):
# self.name = name
# self.cd = cd
# self.td = td
# self.ad = ad
#
# def print_self(self):
# print(self.name,self.cd,self.td,self.ad)
#
# @property
# def name(self):
# return self.__name
#
# @name.setter
# def name(self,value):
# self.__name = value
#
# @property
# def cd(self):
# return self.__cd
#
# @cd.setter
# def cd(self,value):
# self.__cd = value
#
# @property
# def td(self):
# return self.__td
#
# @td.setter
# def td(self,value):
# self.__td = value
#
# @property
# def ad(self):
# return self.__ad
#
# @ad.setter
# def ad(self, value):
# self.__ad = value
#
# L = [
# Skill('降龙十八掌',1,6,20),
# Skill('金刚伏魔',2,5,9),
# Skill('飞龙在天',3,19,13),
# Skill('天下无狗',4,11,2),
# Skill('亢龙有悔',5,8,7)
# ]
#
# def find_name(list_target,name):
# for item in list_target:
# if item.name == name:
# return item
#
# find_name(L,"降龙十八掌").print_self()
# print()
#
# L2 = []
# for item in L:
# if item.td > 10:
# L2.append(item)
#
# for item in L2:
# item.print_self()
#
# print(max([x.ad for x in L]))
# print()
#
# L = sorted(L,key = lambda Teacher:Teacher.td)
# for item in L:
# item.print_self()
#--------------------------------------------------------------------------------------------------
class SkillData:
def __init__(self, name, cd, time, distance):
self.name = name
self.cd = cd
self.time = time
self.atk_distance = distance
@property
def name(self):
return self.__name
@name.setter
def name(self, value):
self.__name = value
@property
def cd(self):
return self.__cd
@cd.setter
def cd(self, value):
self.__cd = value
@property
def time(self):
return self.__time
@time.setter
def time(self, value):
self.__time = value
@property
def atk_distance(self):
return self.__atk_distance
@atk_distance.setter
def atk_distance(self, value):
self.__atk_distance = value
def print_self(self):
print(self.name, self.cd, self.time, self.atk_distance)
list_skills = [
SkillData("降龙十八掌", 60, 10, 5),
SkillData("如来神掌", 50, 5, 15),
SkillData("六脉神剑", 80, 20, 8),
SkillData("一阳指", 20, 50, 15),
SkillData("冷酷追击", 15, 30, 9),
]
# -- 查找名称是"降龙十八掌"的技能对象
for item in list_skills:
if item.name == "降龙十八掌":
item.print_self()
# -- 查找名称是持续时间大于10秒的的所有技能对象
result = []
for item in list_skills:
if item.time > 10:
result.append(item)
# -- 查找攻击距离最远的技能对象
result = list_skills[0]
for i in range(1, len(list_skills)):
# 后面的技能对象
if result.atk_distance < list_skills[i].atk_distance:
result = list_skills[i]
# result.atk_distance = list_skills[i].atk_distance
result.print_self()
# -- 按照持续时间,对列表升序排列.
for r in range(len(list_skills) - 1):
for c in range(r + 1, len(list_skills)):
if list_skills[r].time > list_skills[c].time:
list_skills[r],list_skills[c] = list_skills[c],list_skills[r]
# 请用调试,查看列表的取值.
print(list_skills)
| [
"2712455490@qq.com"
] | 2712455490@qq.com |
c152e031f9a7c91cfde2dd249fba12f4f1b3cb78 | f3d38d0e1d50234ce5f17948361a50090ea8cddf | /백준/Silver/Silver 5/1158번 ; 요세푸스 문제.py | 446a4ad3871ead236aa291b9fccaf9596278359c | [] | no_license | bright-night-sky/algorithm_study | 967c512040c183d56c5cd923912a5e8f1c584546 | 8fd46644129e92137a62db657187b9b707d06985 | refs/heads/main | 2023-08-01T10:27:33.857897 | 2021-10-04T14:36:21 | 2021-10-04T14:36:21 | 323,322,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | # https://www.acmicpc.net/problem/1158
# 첫째 줄에 N, K를 빈 칸으로 구분해 입력합니다.
# 1 <= K <= N <= 5,000
N, K = map(int, input().split())
circle = [number for number in range(1, N + 1)]
poped_number = []
while True:
circle_length = len(circle)
if circle_length == 0:
break | [
"bright_night_sky@naver.com"
] | bright_night_sky@naver.com |
a0a260eae98611dd8badfb76589c148bf4846f3d | 2a1b8a671aceda6bc446f8ce26400aa84fa444a6 | /Packs/CommunityCommonScripts/Scripts/PHash/PHash.py | a65682627380ca763287b167d2edc08522c75a03 | [
"MIT"
] | permissive | demisto/content | 6d4722d46f0ff0beea2748e9f7de585bf91a78b4 | 890def5a0e0ae8d6eaa538148249ddbc851dbb6b | refs/heads/master | 2023-09-04T00:02:25.618032 | 2023-09-03T21:56:22 | 2023-09-03T21:56:22 | 60,525,392 | 1,023 | 1,921 | MIT | 2023-09-14T20:55:24 | 2016-06-06T12:17:02 | Python | UTF-8 | Python | false | false | 389 | py | import demistomock as demisto # noqa: F401
import imagehash
from CommonServerPython import * # noqa: F401
from PIL import Image
ImageID = demisto.args()['image']
ImageFilePath = demisto.getFilePath(ImageID)
hash = imagehash.phash(Image.open(ImageFilePath['path']))
context = {
"PHash": str(hash)
}
command_results = CommandResults(outputs=context)
return_results(command_results)
| [
"noreply@github.com"
] | demisto.noreply@github.com |
5d1de9164de59d2aaa12d87d853da062ae8caca6 | a7f83dbecc14470645de7f502292394f38bc4661 | /router/asgi.py | 7df5255ada09b87226a02a56174c19568f56aa37 | [] | no_license | erllan/ROUTER | f6ab0530b3106c3e3bffe33e46b8ddedb08ab64a | 5b2a2b76edd28887b5b0b2b0ad4a340344bd399a | refs/heads/master | 2023-01-20T10:20:23.355230 | 2020-11-22T19:23:13 | 2020-11-22T19:23:13 | 310,087,275 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
ASGI config for router project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'router.settings')
application = get_asgi_application()
| [
"erlan.kubanychbekov.000@gmail.com"
] | erlan.kubanychbekov.000@gmail.com |
032de7d294c7805f6c78214f7adb90542a8f2a4f | 2a54e8d6ed124c64abb9e075cc5524bb859ba0fa | /.history/3-OO-Python/7-private-public_20200415234405.py | 3f7ad76390081dccb1d9fb972a71267ff401dc12 | [] | no_license | CaptainStorm21/Python-Foundation | 01b5fbaf7a913506518cf22e0339dd948e65cea1 | a385adeda74f43dd7fb2d99d326b0be23db25024 | refs/heads/master | 2021-05-23T01:29:18.885239 | 2020-04-23T19:18:06 | 2020-04-23T19:18:06 | 253,171,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | #
def speak(self):
print(f'my name is {self.name}. I am {self.age}')
player1 = PlayerCharacter('Andy', 1000)
player1.name = 'Wolverine'
player1.speak = 'BooBoo!'
print(player1.speak) | [
"tikana4@yahoo.com"
] | tikana4@yahoo.com |
cf81714c80096d8ff059fc3b666ec1cf12d22ab7 | aee144770c8f4ec5987777aebe5b064e558fc474 | /doc/integrations/pytorch/parlai/tasks/qazre/build.py | 30cde0347bc725cd13474e5283b0f140a383fba7 | [
"MIT",
"CC-BY-SA-3.0",
"Apache-2.0",
"AGPL-3.0-only"
] | permissive | adgang/cortx | 1d8e6314643baae0e6ee93d4136013840ead9f3b | a73e1476833fa3b281124d2cb9231ee0ca89278d | refs/heads/main | 2023-04-22T04:54:43.836690 | 2021-05-11T00:39:34 | 2021-05-11T00:39:34 | 361,394,462 | 1 | 0 | Apache-2.0 | 2021-04-25T10:12:59 | 2021-04-25T10:12:59 | null | UTF-8 | Python | false | false | 1,232 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
import os
from parlai.core.build_data import DownloadableFile
RESOURCES = [
DownloadableFile(
'http://nlp.cs.washington.edu/zeroshot/relation_splits.tar.bz2',
'relation_splits.tar.bz2',
'e33d0e367b6e837370da17a2d09d217e0a92f8d180f7abb3fd543a2d1726b2b4',
)
]
def build(opt):
dpath = os.path.join(opt['datapath'], 'QA-ZRE')
version = None
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
for downloadable_file in RESOURCES:
downloadable_file.download_file(dpath)
# Mark the data as built.
build_data.mark_done(dpath, version_string=version)
| [
"noreply@github.com"
] | adgang.noreply@github.com |
52411521d4c4848047d6566be95021f633aac53f | 480cc056ae6f2c8e098468d13858d01851b3fa5c | /tools/create_json | 1558ac765a7822481868c2a24dc700655088dbcf | [
"MIT"
] | permissive | blechta/betterbib | 1300d18364c717676c54b8a76b3f1043f2249c5b | ba2c6b1c3a599d5397c3c913a1fe7725875665b3 | refs/heads/master | 2020-03-23T08:32:44.704257 | 2018-06-20T20:16:14 | 2018-06-20T20:16:14 | 141,333,072 | 0 | 0 | MIT | 2018-07-17T19:16:59 | 2018-07-17T19:16:59 | null | UTF-8 | Python | false | false | 1,262 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
"""This tool is used for converting JabRef's journal name abbreviation files,
<https://github.com/JabRef/jabref/tree/master/src/main/resources/journals>,
into JSON.
Usage example:
```
cat IEEEJournalListText.txt journalList.txt | ./create_json - journals.json
```
"""
import argparse
import sys
import json
def _main():
args = _parse_cmd_arguments()
# read input file into dictionary
out = {}
for line in args.infile:
sline = line.strip()
if sline[0] == "#":
continue
k, v = sline.split("=")
out[k.strip()] = v.strip()
json.dump(out, args.outfile, indent=2)
return
def _parse_cmd_arguments():
parser = argparse.ArgumentParser(
description="Creates YAML file from `=` input files."
)
parser.add_argument(
"infile",
nargs="?",
type=argparse.FileType("r"),
default=sys.stdin,
help="input `=` files (default: stdin)",
)
parser.add_argument(
"outfile",
nargs="?",
type=argparse.FileType("w"),
default=sys.stdout,
help="output YAML file (default: stdout)",
)
return parser.parse_args()
if __name__ == "__main__":
_main()
| [
"nico.schloemer@gmail.com"
] | nico.schloemer@gmail.com | |
3691f1fd4f857ea86bcecfaf74e4c30c1db3f4f9 | 71e214849d65fb119ac7d82b8bb852cb09953caa | /pin_tester.py | a2b0e7f4181ea684610902968bd2196fed705d72 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | nanuxbe/microbit-files | daabd2d0685786696f5dd4554b1370175669f3c1 | 70fd249fc98635ad8764d0a65b528f79648c6034 | refs/heads/master | 2021-01-17T20:22:27.575172 | 2016-10-20T09:24:26 | 2016-10-20T09:24:26 | 68,598,147 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | from microbit import *
PINS = [pin0, pin1, pin2, pin8, pin16]
while True:
if button_a.is_pressed():
display.show("1")
for pin in PINS:
pin.write_digital(1)
else:
display.show(Image.NO)
for pin in PINS:
pin.write_digital(0) | [
"emma@levit.be"
] | emma@levit.be |
63c498bd4d2d283e7d36c07193348b6caeb20f9d | ee1258111670dc0d12d93099a1bcc5ae5ac6b948 | /chainer/nn.py | 609878ea2b1cb0c007ba454ee4063ab6db3ecc36 | [] | no_license | cpple/mnistCUDNN | d5bf0a4680892b023d6875fe845d97a025ad1652 | 0f1a3395fd9c5b6e87a9903e94845a5833e46cee | refs/heads/master | 2021-09-23T23:24:21.871321 | 2018-09-29T01:56:44 | 2018-09-29T01:56:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,145 | py | from chainer import Chain
import chainer.functions as F
import chainer.links as L
# ネットワーク定義
k = 16
fcl = 256
class NN(Chain):
def __init__(self):
super(NN, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(in_channels = 1, out_channels = k, ksize = 3, pad = 1)
self.conv2 = L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1)
self.conv3 = L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1)
self.l4 = L.Linear(7*7*k, fcl)
self.l5 = L.Linear(fcl, 10)
self.bn1 = L.BatchNormalization(k)
self.bn2 = L.BatchNormalization(k)
def __call__(self, x):
h = self.conv1(F.reshape(x, (len(x), 1, 28, 28)))
h = self.bn1(h)
h1 = F.relu(h)
# resnet block
h = self.conv2(h1)
h = self.bn2(h)
h = h + h1
h = F.max_pooling_2d(F.relu(h), 2)
h = self.conv3(h)
h = F.max_pooling_2d(F.relu(h), 2)
h = F.relu(self.l4(h))
h = F.dropout(h, ratio=0.4)
return self.l5(h) | [
"tadaoyamaoka@gmail.com"
] | tadaoyamaoka@gmail.com |
bd6477e67f2ed7e8fd503b734b36b9f9138ec3c2 | 21dbd7e2e91636a24b4482d4f49acd29bf87664e | /spell/__init__.py | 0afe6077a092df621805351c2215040e094ed674 | [
"MIT"
] | permissive | sbuvaneshkumar/open-tamil | 688f8826f0fc285c1cd4098ef8d448a10193fc61 | 903297c8fe6510077d8d65a41adaaeb7da9c5e97 | refs/heads/master | 2021-04-06T08:26:23.403276 | 2018-03-11T02:51:56 | 2018-03-11T02:51:56 | 124,749,654 | 0 | 0 | MIT | 2018-03-11T11:28:18 | 2018-03-11T11:28:18 | null | UTF-8 | Python | false | false | 130 | py | ## -*- coding: utf-8 -*-
## (C) 2016-17 Muthiah Annamalai,
from .spell import LoadDictionary, Mayangoli, OttruSplit, Speller
| [
"ezhillang@gmail.com"
] | ezhillang@gmail.com |
a631f571b4f65d85fb6322bd065ab83755349a3c | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/uniquePaths_20200728102526.py | 971197f817b1ca80f65931fe080d5b65235b275c | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | def uniquePath(arr):
# mark one as None or -1 thatway in the case where
# we have to only calculate the once where there is no none
m = len(arr)
for i in range(len(arr)):
for j in range(len(arr[i])):
if arr[i][j] == 1:
arr[i][j] = "None"
elif i == 0 or j == 0:
arr[i][j] = 1
for i in range(len(arr)):
for j in range(len(arr[i])):
if arr[i][j] == "None":
arr[i][j-1] = 1
arr[i-1][j] = 1
arr[i+1][j] = 1
arr[i][j+1] = 1
else:
if arr[i-1][j] != "None" and arr[i][j-1] != "None":
arr[i][j] = arr[i-1][j] + arr[i][j-1]
print(arr[])
uniquePath([
[0,0,0],
[0,1,0],
[0,0,0]
]) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
8f5e5805d30d579a4a7c72aac4eced82ef28fdf3 | 58db5cdab90e34107018892b69c6b4ed63fc2604 | /findMissing.py | 8e48c7a6e0db6498a9ba5f40e0fc8b5c3462c4cf | [] | no_license | jamiejamiebobamie/pythonPlayground | 2b329d0a8741146f7a715b15b33c4e2f24a9a677 | b622eeb78b22760d2570cc085716f74b344072e8 | refs/heads/master | 2020-04-14T15:03:01.127800 | 2019-10-03T23:34:32 | 2019-10-03T23:34:32 | 163,914,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,090 | py | # following along to this video: https://interviewing.io/recordings/Python-Airbnb-4
# write a fucntion that takes in two arrays
# and finds the one element that is missing from the second array
# a good question to ask is if the items are sorted and all unique?
def findMissing(A1, A2):
# n is the number of elements in A1
matches = set(A2) #O(n-1)
for item in A1: # less than O(n)
if not item in matches:
return item
return "wrong input"
A1 = [1,2,3,4,5,6,7,8,9,10]
A2 = [1,2,3,4,5,6,8,9,10]
print(findMissing(A1, A2)) # time complexity is less than O(2n)
def findMissingLowSpace(A1, A2):
# O(2n) time, O(1ish) space though you can't be sure of size of int
sum1 = sum2 = 0
for item in A1:
sum1 += item
for item in A2:
sum2 += item
return sum1 - sum2
print(findMissingLowSpace(A1,A2))
#interview O(1) space solution:
def find_missing_xor(A1, A2):
xor_sum = 0
for num in A1:
xor_sum ^= num
for num in A2:
xor_sum ^= num
return xor_sum
print(find_missing_xor(A1, A2))
| [
"jmccrory@vt.edu"
] | jmccrory@vt.edu |
c4eded530c35bf1c64cdcaa35076a21e179afae4 | e1d6de1fb5ce02907df8fa4d4e17e61d98e8727d | /pos_tagging/proper_names_tagger.py | eea564f21ab11b5909780f17c1e61d2d7df3f0be | [] | no_license | neuroph12/nlpy | 3f3d1a8653a832d6230cb565428ee0c77ef7451d | 095976d144dacf07414bf7ee42b811eaa67326c1 | refs/heads/master | 2020-09-16T08:24:37.381353 | 2016-09-10T19:24:05 | 2016-09-10T19:24:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | from taggers import NamesTagger
nt = NamesTagger()
print(nt.tag(['Anders', 'Bill', 'Candy', 'Somename']))
| [
"anderscui@gmail.com"
] | anderscui@gmail.com |
9b5cba1ad32b66f26e908a6d19097808590cc5b1 | 93db886848da0d584e022da861f8e4065978bf69 | /americancultures/lib/python3.7/site-packages/sqlalchemy/dialects/mssql/pyodbc.py | 5154f0de165a9727f34137685ca29171c97836c7 | [] | no_license | jiyoojeong/code_examples_Jan2020 | 91096d7b5b8ac97b49ddfd348f9b75422bec14c8 | 4f0331f87b595b66a0c17db8e8fb2c0c99eff60e | refs/heads/master | 2020-12-27T09:36:53.836823 | 2020-02-03T00:13:46 | 2020-02-03T00:13:46 | 237,853,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,286 | py | # mssql/pyodbc.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: mssql+pyodbc
:name: PyODBC
:dbapi: pyodbc
:connectstring: mssql+pyodbc://<username>:<password>@<dsnname>
:url: http://pypi.python.org/pypi/pyodbc/
Connecting to PyODBC
--------------------
The URL here is to be translated to PyODBC connection strings, as
detailed in `ConnectionStrings <https://code.google.com/p/pyodbc/wiki/ConnectionStrings>`_.
DSN Connections
^^^^^^^^^^^^^^^
A DSN-based connection is **preferred** overall when using ODBC. A
basic DSN-based connection looks like::
engine = create_engine("mssql+pyodbc://scott:tiger@some_dsn")
Which above, will pass the following connection string to PyODBC::
dsn=mydsn;UID=user;PWD=pass
If the username and password are omitted, the DSN form will also add
the ``Trusted_Connection=yes`` directive to the ODBC string.
Hostname Connections
^^^^^^^^^^^^^^^^^^^^
Hostname-based connections are **not preferred**, however are supported.
The ODBC driver name must be explicitly specified::
engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0")
.. versionchanged:: 1.0.0 Hostname-based PyODBC connections now require the
SQL Server driver name specified explicitly. SQLAlchemy cannot
choose an optimal default here as it varies based on platform
and installed drivers.
Other keywords interpreted by the Pyodbc dialect to be passed to
``pyodbc.connect()`` in both the DSN and hostname cases include:
``odbc_autotranslate``, ``ansi``, ``unicode_results``, ``autocommit``.
Pass through exact Pyodbc string
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A PyODBC connection string can also be sent exactly as specified in
`ConnectionStrings <https://code.google.com/p/pyodbc/wiki/ConnectionStrings>`_
into the driver using the parameter ``odbc_connect``. The delimeters must be
URL escaped, however, as illustrated below using ``urllib.parse.quote_plus``::
import urllib
params = urllib.parse.quote_plus("DRIVER={SQL Server Native Client 10.0};SERVER=dagger;DATABASE=test;UID=user;PWD=password")
engine = create_engine("mssql+pyodbc:///?odbc_connect=%s" % params)
Driver / Unicode Support
-------------------------
PyODBC works best with Microsoft ODBC drivers, particularly in the area
of Unicode support on both Python 2 and Python 3.
Using the FreeTDS ODBC drivers on Linux or OSX with PyODBC is **not**
recommended; there have been historically many Unicode-related issues
in this area, including before Microsoft offered ODBC drivers for Linux
and OSX. Now that Microsoft offers drivers for all platforms, for
PyODBC support these are recommended. FreeTDS remains relevant for
non-ODBC drivers such as pymssql where it works very well.
Rowcount Support
----------------
Pyodbc only has partial support for rowcount. See the notes at
:ref:`mssql_rowcount_versioning` for important notes when using ORM
versioning.
.. _mssql_pyodbc_fastexecutemany:
Fast Executemany Mode
---------------------
The Pyodbc driver has added support for a "fast executemany" mode of execution
which greatly reduces round trips for a DBAPI ``executemany()`` call when using
Microsoft ODBC drivers. The feature is enabled by setting the flag
``.fast_executemany`` on the DBAPI cursor when an executemany call is to be
used. The SQLAlchemy pyodbc SQL Server dialect supports setting this flag
automatically when the ``.fast_executemany`` flag is passed to
:func:`.create_engine`; note that the ODBC driver must be the Microsoft driver
in order to use this flag::
engine = create_engine(
"mssql+pyodbc://scott:tiger@mssql2017:1433/test?driver=ODBC+Driver+13+for+SQL+Server",
fast_executemany=True)
.. versionadded:: 1.3
.. seealso::
`fast executemany <https://github.com/mkleehammer/pyodbc/wiki/Features-beyond-the-DB-API#fast_executemany>`_
- on github
""" # noqa
import decimal
import re
from .base import BINARY
from .base import MSDialect
from .base import MSExecutionContext
from .base import VARBINARY
from ... import exc
from ... import types as sqltypes
from ... import util
from ...connectors.pyodbc import PyODBCConnector
class _ms_numeric_pyodbc(object):
"""Turns Decimals with adjusted() < 0 or > 7 into strings.
The routines here are needed for older pyodbc versions
as well as current mxODBC versions.
"""
def bind_processor(self, dialect):
super_process = super(_ms_numeric_pyodbc, self).bind_processor(dialect)
if not dialect._need_decimal_fix:
return super_process
def process(value):
if self.asdecimal and isinstance(value, decimal.Decimal):
adjusted = value.adjusted()
if adjusted < 0:
return self._small_dec_to_string(value)
elif adjusted > 7:
return self._large_dec_to_string(value)
if super_process:
return super_process(value)
else:
return value
return process
# these routines needed for older versions of pyodbc.
# as of 2.1.8 this logic is integrated.
def _small_dec_to_string(self, value):
return "%s0.%s%s" % (
(value < 0 and "-" or ""),
"0" * (abs(value.adjusted()) - 1),
"".join([str(nint) for nint in value.as_tuple()[1]]),
)
def _large_dec_to_string(self, value):
_int = value.as_tuple()[1]
if "E" in str(value):
result = "%s%s%s" % (
(value < 0 and "-" or ""),
"".join([str(s) for s in _int]),
"0" * (value.adjusted() - (len(_int) - 1)),
)
else:
if (len(_int) - 1) > value.adjusted():
result = "%s%s.%s" % (
(value < 0 and "-" or ""),
"".join([str(s) for s in _int][0 : value.adjusted() + 1]),
"".join([str(s) for s in _int][value.adjusted() + 1 :]),
)
else:
result = "%s%s" % (
(value < 0 and "-" or ""),
"".join([str(s) for s in _int][0 : value.adjusted() + 1]),
)
return result
class _MSNumeric_pyodbc(_ms_numeric_pyodbc, sqltypes.Numeric):
pass
class _MSFloat_pyodbc(_ms_numeric_pyodbc, sqltypes.Float):
pass
class _ms_binary_pyodbc(object):
"""Wraps binary values in dialect-specific Binary wrapper.
If the value is null, return a pyodbc-specific BinaryNull
object to prevent pyODBC [and FreeTDS] from defaulting binary
NULL types to SQLWCHAR and causing implicit conversion errors.
"""
def bind_processor(self, dialect):
if dialect.dbapi is None:
return None
DBAPIBinary = dialect.dbapi.Binary
def process(value):
if value is not None:
return DBAPIBinary(value)
else:
# pyodbc-specific
return dialect.dbapi.BinaryNull
return process
class _VARBINARY_pyodbc(_ms_binary_pyodbc, VARBINARY):
pass
class _BINARY_pyodbc(_ms_binary_pyodbc, BINARY):
pass
class MSExecutionContext_pyodbc(MSExecutionContext):
_embedded_scope_identity = False
def pre_exec(self):
"""where appropriate, issue "select scope_identity()" in the same
statement.
Background on why "scope_identity()" is preferable to "@@identity":
http://msdn.microsoft.com/en-us/library/ms190315.aspx
Background on why we attempt to embed "scope_identity()" into the same
statement as the INSERT:
http://code.google.com/p/pyodbc/wiki/FAQs#How_do_I_retrieve_autogenerated/identity_values?
"""
super(MSExecutionContext_pyodbc, self).pre_exec()
# don't embed the scope_identity select into an
# "INSERT .. DEFAULT VALUES"
if (
self._select_lastrowid
and self.dialect.use_scope_identity
and len(self.parameters[0])
):
self._embedded_scope_identity = True
self.statement += "; select scope_identity()"
def post_exec(self):
if self._embedded_scope_identity:
# Fetch the last inserted id from the manipulated statement
# We may have to skip over a number of result sets with
# no data (due to triggers, etc.)
while True:
try:
# fetchall() ensures the cursor is consumed
# without closing it (FreeTDS particularly)
row = self.cursor.fetchall()[0]
break
except self.dialect.dbapi.Error:
# no way around this - nextset() consumes the previous set
# so we need to just keep flipping
self.cursor.nextset()
self._lastrowid = int(row[0])
else:
super(MSExecutionContext_pyodbc, self).post_exec()
class MSDialect_pyodbc(PyODBCConnector, MSDialect):
execution_ctx_cls = MSExecutionContext_pyodbc
colspecs = util.update_copy(
MSDialect.colspecs,
{
sqltypes.Numeric: _MSNumeric_pyodbc,
sqltypes.Float: _MSFloat_pyodbc,
BINARY: _BINARY_pyodbc,
# SQL Server dialect has a VARBINARY that is just to support
# "deprecate_large_types" w/ VARBINARY(max), but also we must
# handle the usual SQL standard VARBINARY
VARBINARY: _VARBINARY_pyodbc,
sqltypes.VARBINARY: _VARBINARY_pyodbc,
sqltypes.LargeBinary: _VARBINARY_pyodbc,
},
)
def __init__(
self, description_encoding=None, fast_executemany=False, **params
):
if "description_encoding" in params:
self.description_encoding = params.pop("description_encoding")
super(MSDialect_pyodbc, self).__init__(**params)
self.use_scope_identity = (
self.use_scope_identity
and self.dbapi
and hasattr(self.dbapi.Cursor, "nextset")
)
self._need_decimal_fix = self.dbapi and self._dbapi_version() < (
2,
1,
8,
)
self.fast_executemany = fast_executemany
def _get_server_version_info(self, connection):
try:
# "Version of the instance of SQL Server, in the form
# of 'major.minor.build.revision'"
raw = connection.scalar(
"SELECT CAST(SERVERPROPERTY('ProductVersion') AS VARCHAR)"
)
except exc.DBAPIError:
# SQL Server docs indicate this function isn't present prior to
# 2008. Before we had the VARCHAR cast above, pyodbc would also
# fail on this sort.py.
return super(MSDialect_pyodbc, self)._get_server_version_info(
connection, allow_chars=False
)
else:
version = []
r = re.compile(r"[.\-]")
for n in r.split(raw):
try:
version.append(int(n))
except ValueError:
pass
return tuple(version)
def do_executemany(self, cursor, statement, parameters, context=None):
if self.fast_executemany:
cursor.fast_executemany = True
super(MSDialect_pyodbc, self).do_executemany(
cursor, statement, parameters, context=context
)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.Error):
for code in (
"08S01",
"01002",
"08003",
"08007",
"08S02",
"08001",
"HYT00",
"HY010",
"10054",
):
if code in str(e):
return True
return super(MSDialect_pyodbc, self).is_disconnect(
e, connection, cursor
)
dialect = MSDialect_pyodbc
| [
"jiyooj@gmail.com"
] | jiyooj@gmail.com |
021797fb0a2fbaf4c5a693862c72aa1bd145dc71 | f96a40b46adf8820150ac26fcc62b477035ef799 | /python re/re2.py | 63bf2abcda2c169f8d6b256a973a343bef91c751 | [] | no_license | borkarfaiz/python | 0cacf85b262ea0b2040488d7ec1b83565f85d736 | a4f5d98af2455bd3176003ca43aeefaa31e85dc3 | refs/heads/master | 2020-05-23T05:23:39.628953 | 2017-07-13T15:02:34 | 2017-07-13T15:02:34 | 56,511,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | import re
f = open('random.txt')
strToSearch = ""
for line in f:
strToSearch += line
print(strToSearch)
patFinder1 = re.compile('a+$')
findPat = re.search(patFinder1, strToSearch)
print(re.findall(patFinder1, strToSearch))
| [
"borkarfaiz@gmail.com"
] | borkarfaiz@gmail.com |
abcc5cdd9a7ab07f52f6ffd956775be41121d4e3 | 35b6013c1943f37d1428afd2663c8aba0a02628d | /trace/trace-python-sample-opentelemetry/main_test.py | 2e5b0e80eaaa1083b24f5cb64928ffba3491e7e6 | [
"Apache-2.0"
] | permissive | GoogleCloudPlatform/python-docs-samples | d2a251805fbeab15d76ed995cf200727f63f887d | 44e819e713c3885e38c99c16dc73b7d7478acfe8 | refs/heads/main | 2023-08-28T12:52:01.712293 | 2023-08-28T11:18:28 | 2023-08-28T11:18:28 | 35,065,876 | 7,035 | 7,593 | Apache-2.0 | 2023-09-14T20:20:56 | 2015-05-04T23:26:13 | Jupyter Notebook | UTF-8 | Python | false | false | 1,279 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import main
def test_index() -> None:
project_id = os.environ["GOOGLE_CLOUD_PROJECT"]
main.app.testing = True
main.app.config["TRACER"] = main.initialize_tracer(project_id)
client = main.app.test_client()
resp = client.get("/index.html")
assert resp.status_code == 200
assert "Tracing requests" in resp.data.decode("utf-8")
def test_redirect() -> None:
project_id = os.environ["GOOGLE_CLOUD_PROJECT"]
main.app.testing = True
main.app.config["TRACER"] = main.initialize_tracer(project_id)
client = main.app.test_client()
resp = client.get("/")
assert resp.status_code == 302
assert "/index.html" in resp.headers.get("location", "")
| [
"noreply@github.com"
] | GoogleCloudPlatform.noreply@github.com |
555bc475cdfcfef2a817eebd802725e0e3bcb899 | 059038f86df6c285cc9bfbd8e1924eea37906160 | /jobplus/handlers/front.py | fc45cb5d17b71d3ff01ad42ab99170dbffa4f8c1 | [] | no_license | LouPlus/jobplus11-1 | f6249279a625b8d151eb5e23e7ce16e63aade2af | 59dc0e2437f69dec3a4665a8074e3fd286e97f00 | refs/heads/master | 2023-02-17T19:54:15.335352 | 2019-05-18T11:00:15 | 2019-05-18T11:00:15 | 183,372,297 | 2 | 2 | null | 2023-02-02T06:25:14 | 2019-04-25T06:39:54 | Python | UTF-8 | Python | false | false | 1,390 | py | from flask import Blueprint,render_template
from flask import flash,redirect,url_for
from flask_login import login_user,logout_user,login_required
from jobplus.forms import LoginForm,RegisterForm
from jobplus.models import User
front = Blueprint('front',__name__)
@front.route('/')
def index():
return render_template('index.html')
@front.route('/login',methods=['GET','POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
login_user(user,form.remember_me.data)
return redirect(url_for('.index'))
return render_template('login.html',form=form)
@front.route('/company_register',methods=['GET','POST'])
def company_register():
form = RegisterForm()
if form.validate_on_submit():
form.create_company_user()
flash('注册成功,请登录!','success')
return redirect(url_for('.login'))
return render_template('company_register.html',form=form)
@front.route('/user_register',methods=['GET','POST'])
def user_register():
form = RegisterForm()
if form.validate_on_submit():
form.create_user()
flash('注册成功,请登录!','success')
return redirect(url_for('.login'))
return render_template('user_register.html',form=form)
@front.route('/logout')
@login_required
def logout():
logout_user()
flash('您已经退出登录','success')
return redirect(url_for('.index'))
| [
"34020606+LouPlus@users.noreply.github.com"
] | 34020606+LouPlus@users.noreply.github.com |
b693d2d08b70e91e1f82b7f83dd5f455b55e0309 | fda3405523689654b35b8f1c62cb8ed2728470d1 | /stests/core/clx/stream.py | eb135ed7060f6399c39bc40e652d6ac28d57f122 | [] | no_license | piokuc/stests | f5d98e17b4883bf2bd19761be15485c484b1eabc | 2d1301e01576dde574c25609671daf1821edfa0c | refs/heads/master | 2021-03-18T10:25:13.457975 | 2020-03-18T17:37:09 | 2020-03-18T17:37:09 | 247,066,284 | 0 | 0 | null | 2020-03-13T12:28:15 | 2020-03-13T12:28:14 | null | UTF-8 | Python | false | false | 1,777 | py | import typing
from stests.core.clx.utils import get_client
from stests.core.clx.utils import clx_op
from stests.core.domain import NetworkIdentifier
from stests.core.domain import NodeIdentifier
from stests.core.utils import logger
@clx_op
def stream_events(
src: typing.Union[NodeIdentifier, NetworkIdentifier],
on_block_added: typing.Callable = None,
on_block_finalized: typing.Callable = None
):
"""Hooks upto network streaming events.
:param src: The source from which a network node will be derived.
:param on_block_added: Callback to invoke whenever a block is added to chain.
:param on_block_finalized: Callback to invoke whenever a block is finalized.
"""
for node, event in _yield_events(src, on_block_added, on_block_finalized):
if on_block_added and event.HasField("block_added"):
bhash = event.block_added.block.summary.block_hash.hex()
logger.log(f"PYCLX :: stream_events :: block added :: {bhash}")
on_block_added(node, bhash)
elif on_block_finalized and event.HasField("new_finalized_block"):
bhash = event.new_finalized_block.block_hash.hex()
logger.log(f"PYCLX :: stream_events :: block finalized :: {bhash}")
on_block_finalized(node, bhash)
def _yield_events(
src: typing.Union[NodeIdentifier, NetworkIdentifier],
on_block_added: typing.Optional[typing.Callable],
on_block_finalized: typing.Optional[typing.Callable]
):
"""Yields events from event source (i.e. a CLX chain).
"""
node, client = get_client(src)
for event in client.stream_events(
block_added=on_block_added is not None,
block_finalized=on_block_finalized is not None
):
yield node, event
| [
"mark@casperlabs.io"
] | mark@casperlabs.io |
8f957c13ca72094e0b2e99956969a9d18eb2eb1e | a3482e5b922bcc5b8d8fd3dc49a29a3073176191 | /source_py3/python_toolbox/introspection_tools.py | 1d884ef81347a915596dd9d03d1740f187712b53 | [
"MIT"
] | permissive | apddup/python_toolbox | 4d2079826218255240a27b9b977b3a4fc2045ee3 | 2d336f361122ad4216669b7a3e1d794fa2a76db1 | refs/heads/master | 2021-01-18T17:09:02.879773 | 2013-10-06T18:20:34 | 2013-10-06T18:20:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | # Copyright 2009-2013 Ram Rachum.
# This program is distributed under the MIT license.
'''Defines various introspection tools, similar to the stdlib's `inspect`.'''
from python_toolbox import cute_inspect
from python_toolbox.nifty_collections import OrderedDict
def get_default_args_dict(function):
'''
Get ordered dict from arguments which have a default to their default.
Example:
>>> def f(a, b, c=1, d='meow'): pass
>>> get_default_args_dict(f)
OrderedDict([('c', 1), ('d', 'meow')])
'''
arg_spec = cute_inspect.getargspec(function)
(s_args, s_star_args, s_star_kwargs, s_defaults) = arg_spec
# `getargspec` has a weird policy, when inspecting a function with no
# defaults, to give a `defaults` of `None` instead of the more consistent
# `()`. We fix that here:
if s_defaults is None:
s_defaults = ()
# The number of args which have default values:
n_defaultful_args = len(s_defaults)
defaultful_args = s_args[-n_defaultful_args:] if n_defaultful_args \
else []
return OrderedDict(zip(defaultful_args, s_defaults))
| [
"ram@rachum.com"
] | ram@rachum.com |
1e467b51e3c06ca572cb48209a76785c48c96df6 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2826/60659/260490.py | 971d4304f27404a9a57c1a1c15e77cfa27a8b55d | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | n=int(input())
num=input().split(" ")
for i in range(n):
num[i]=int(num[i])
num.sort()
result=0
for i in range(1,n):
if num[i]<num[i-1]:
result+=num[i-1]-num[i]+1
num[i]=num[i-1]+1
elif num[i]==num[i-1]:
num[i]+=1
result+=1
print(result) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
4aaa3c796e97075709ceef44423a81d11391d236 | 7950c4faf15ec1dc217391d839ddc21efd174ede | /problems/1658.0_Minimum_Operations_to_Reduce_X_to_Zero.py | 7d298d830bc7aa62f9dadacedb3fc628f5fc96c5 | [] | no_license | lixiang2017/leetcode | f462ecd269c7157aa4f5854f8c1da97ca5375e39 | f93380721b8383817fe2b0d728deca1321c9ef45 | refs/heads/master | 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,854 | py | '''
prefix sum + binary search
T: O(7 * N + NlogN) = O(NlogN)
S: O(2N)
Runtime: 2397 ms, faster than 8.98% of Python3 online submissions for Minimum Operations to Reduce X to Zero.
Memory Usage: 28.4 MB, less than 44.81% of Python3 online submissions for Minimum Operations to Reduce X to Zero.
'''
class Solution:
def minOperations(self, nums: List[int], x: int) -> int:
n = len(nums)
left = list(accumulate(nums))
if left[-1] < x or (nums[0] > x and nums[-1] > x):
return -1
if nums[0] == x or nums[-1] == x:
return 1
right = list(reversed(list(accumulate(reversed(nums)))))
ans = n + 1
for i in range(1, n):
if right[i] == x:
ans = min(ans, n - i)
elif left[i] == x:
ans = min(ans, i + 1)
else:
idx = bisect_left(left, x - right[i], 0, i - 1)
if left[idx] + right[i] == x:
# for right, i...n-1
ans = min(ans, idx + 1 + n - i)
return -1 if ans == n + 1 else ans
'''
nums.reverse()
Runtime: 1515 ms, faster than 60.15% of Python3 online submissions for Minimum Operations to Reduce X to Zero.
Memory Usage: 28.1 MB, less than 50.24% of Python3 online submissions for Minimum Operations to Reduce X to Zero.
'''
class Solution:
def minOperations(self, nums: List[int], x: int) -> int:
n = len(nums)
left = list(accumulate(nums))
if left[-1] < x or (nums[0] > x and nums[-1] > x):
return -1
if nums[0] == x or nums[-1] == x:
return 1
nums.reverse()
right = list(accumulate(nums))
right.reverse()
ans = n + 1
for i in range(1, n):
if right[i] == x:
ans = min(ans, n - i)
elif left[i] == x:
ans = min(ans, i + 1)
else:
idx = bisect_left(left, x - right[i], 0, i - 1)
if left[idx] + right[i] == x:
# for right, i...n-1
ans = min(ans, idx + 1 + n - i)
return -1 if ans == n + 1 else ans
'''
right[i] = right[i + 1] + nums[i]
Runtime: 2933 ms, faster than 5.20% of Python3 online submissions for Minimum Operations to Reduce X to Zero.
Memory Usage: 28.2 MB, less than 50.24% of Python3 online submissions for Minimum Operations to Reduce X to Zero.
'''
class Solution:
def minOperations(self, nums: List[int], x: int) -> int:
n = len(nums)
left = list(accumulate(nums))
if left[-1] < x or (nums[0] > x and nums[-1] > x):
return -1
if nums[0] == x or nums[-1] == x:
return 1
right = [nums[-1]] * n
for i in range(n - 2, -1, -1):
right[i] = right[i + 1] + nums[i]
ans = n + 1
for i in range(1, n):
if right[i] == x:
ans = min(ans, n - i)
elif left[i] == x:
ans = min(ans, i + 1)
else:
idx = bisect_left(left, x - right[i], 0, i - 1)
if left[idx] + right[i] == x:
# for right, i...n-1
ans = min(ans, idx + 1 + n - i)
return -1 if ans == n + 1 else ans
'''
sliding window / two pointers
T: O(N)
S: O(1)
Runtime: 1683 ms, faster than 46.23% of Python3 online submissions for Minimum Operations to Reduce X to Zero.
Memory Usage: 27.9 MB, less than 96.23% of Python3 online submissions for Minimum Operations to Reduce X to Zero.
'''
class Solution:
def minOperations(self, nums: List[int], x: int) -> int:
# to find complementary
n, s = len(nums), sum(nums)
if s < x or (nums[0] > x and nums[-1] > x):
return -1
if s == x:
return n
com = s - x
i = j = 0
subsum = 0
longest_sub = 0
while i < n and j <= n:
if subsum < com:
if j == n:
break
subsum += nums[j]
if subsum == com:
longest_sub = max(longest_sub, j - i + 1) # [i...j]
j += 1
else:
subsum -= nums[i]
i += 1
if subsum == com:
longest_sub = max(longest_sub, j - i) # [i...j-1], caused by `j += 1`
return -1 if longest_sub == 0 else n - longest_sub
'''
Input: [8828,9581,49,9818,9974,9869,9991,10000,10000,10000,9999,9993,9904,8819,1231,6309]
134365
Output: -1
Expected: 16
Input
[5,1,4,2,3]
6
Output
-1
Expected
2
'''
'''
sliding window / two pointers
T: O(N)
S: O(1)
Runtime: 1460 ms, faster than 64.63% of Python3 online submissions for Minimum Operations to Reduce X to Zero.
Memory Usage: 28 MB, less than 62.03% of Python3 online submissions for Minimum Operations to Reduce X to Zero.
'''
class Solution:
def minOperations(self, nums: List[int], x: int) -> int:
# to find complementary
n, s = len(nums), sum(nums)
if s < x or (nums[0] > x and nums[-1] > x):
return -1
if s == x:
return n
com = s - x
i = j = 0
subsum = 0
longest_sub = 0
while i < n and j <= n:
if subsum < com:
if j == n:
break
subsum += nums[j]
j += 1
elif subsum > com:
subsum -= nums[i]
i += 1
else:
longest_sub = max(longest_sub, j - i) # [i...j-1]
subsum -= nums[i]
if j == n:
break
subsum += nums[j]
i += 1
j += 1
return -1 if longest_sub == 0 else n - longest_sub
'''
hash table
T: O(2N) = O(N)
S: O(N)
Runtime: 2198 ms, faster than 13.69% of Python3 online submissions for Minimum Operations to Reduce X to Zero.
Memory Usage: 35.8 MB, less than 29.25% of Python3 online submissions for Minimum Operations to Reduce X to Zero.
'''
class Solution:
def minOperations(self, nums: List[int], x: int) -> int:
# postsum -> idx
post, postsum = {}, 0
n = len(nums)
post[0] = n
for i in range(n - 1, -1, -1):
postsum += nums[i]
post[postsum] = i
presum = 0
ans = n + 1
if x in post:
ans = n - post[x]
for i in range(n):
presum += nums[i]
if x - presum in post and post[x - presum] > i:
idx = post[x - presum]
ans = min(ans, i + 1 + n - idx)
return -1 if ans == n + 1 else ans
| [
"laoxing201314@outlook.com"
] | laoxing201314@outlook.com |
50cb5ea287bdd4b7096a44d540b1becc6dbe37fc | 5e3ebc83bc3fe2f85c34563689b82b1fc8b93a04 | /google/ads/googleads/v5/errors/types/collection_size_error.py | 35f3e3097af069f358524da1dfe9179f0e3c4700 | [
"Apache-2.0"
] | permissive | pdsing/google-ads-python | 0ce70227cd6bb13a25cd13de0ca05c2636279ecd | ee2c059498d5679a0d1d9011f3795324439fad7c | refs/heads/master | 2023-05-04T18:39:57.412453 | 2021-05-21T16:38:17 | 2021-05-21T16:38:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v5.errors",
marshal="google.ads.googleads.v5",
manifest={"CollectionSizeErrorEnum",},
)
class CollectionSizeErrorEnum(proto.Message):
r"""Container for enum describing possible collection size
errors.
"""
class CollectionSizeError(proto.Enum):
r"""Enum describing possible collection size errors."""
UNSPECIFIED = 0
UNKNOWN = 1
TOO_FEW = 2
TOO_MANY = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"noreply@github.com"
] | pdsing.noreply@github.com |
af447697d230967e564b8d43cc5849a419c7d3b1 | 0cf6cc52e2d8f230d4a4265e955dd3e0810653c0 | /my_prj/test_utils/factories/poll.py | bc027b22d8667a7ea48fd6f6c4c5fdef0d0218e7 | [] | no_license | miphreal/dev2dev-testing | c67c4fc2934d8ec4831e676e6d46071bca89fa25 | c09e64984f3e53d8f61e4b0b5236482eaa38b663 | refs/heads/master | 2021-01-11T00:06:03.503707 | 2016-10-25T15:02:42 | 2016-10-25T15:02:42 | 70,728,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | from my_prj.polls.models import Question, Choice
from my_prj.test_utils import *
class ChoiceFactory(factory.DjangoModelFactory):
class Meta:
model = Choice
choice_text = factory.Faker('text')
votes = 0
class QuestionFactory(factory.DjangoModelFactory):
class Meta:
model = Question
question_text = factory.Faker('text')
pub_date = FuzzyAttribute(lambda: timezone.now() + timedelta(days=7))
@factory.post_generation
def choices(self, create, extracted, **kwargs):
if create and not extracted:
ChoiceFactory.create_batch(5, question=self)
| [
"miphreal@gmail.com"
] | miphreal@gmail.com |
0d523cbadea58db5844fa5dd87a4eff41050d77b | aad164e4efe1d55cc189c35956bfd435b14a0f52 | /eve-8.21.494548/lib/carbonstdlib/contextlib.py | 9fd6320c1f1294c1aa557a4f38f9c87283acba88 | [] | no_license | Pluckyduck/eve | 61cc41fe8fd4dca4fbdcc4761a37bcfeb27ed84f | 9a277707ab1f162c6bd9618faf722c0be3ea93ad | refs/heads/master | 2020-12-28T23:35:29.992875 | 2013-05-06T14:24:33 | 2013-05-06T14:24:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,185 | py | #Embedded file name: c:\depot\games\branches\release\EVE-TRANQUILITY\carbon\common\stdlib\contextlib.py
import sys
from functools import wraps
from warnings import warn
__all__ = ['contextmanager', 'nested', 'closing']
class GeneratorContextManager(object):
def __init__(self, gen):
self.gen = gen
def __enter__(self):
try:
return self.gen.next()
except StopIteration:
raise RuntimeError("generator didn't yield")
def __exit__(self, type, value, traceback):
if type is None:
try:
self.gen.next()
except StopIteration:
return
raise RuntimeError("generator didn't stop")
else:
if value is None:
value = type()
try:
self.gen.throw(type, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except StopIteration as exc:
return exc is not value
except:
if sys.exc_info()[1] is not value:
raise
def contextmanager(func):
@wraps(func)
def helper(*args, **kwds):
return GeneratorContextManager(func(*args, **kwds))
return helper
@contextmanager
def nested(*managers):
warn('With-statements now directly support multiple context managers', DeprecationWarning, 3)
exits = []
vars = []
exc = (None, None, None)
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
raise exc[0], exc[1], exc[2]
class closing(object):
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close() | [
"ferox2552@gmail.com"
] | ferox2552@gmail.com |
8d1f121a49b4b93f52bd2c8ac30dca5cf7ee5287 | 7d2dd1dd450d44f6e3cff9e527978853d1b4e7dc | /src/python/txtai/embeddings.py | 8588de9d4e741d25e85747bd1cea08891cd58be9 | [
"Apache-2.0"
] | permissive | zhongbin1/txtai | f2c1c6094ec199db0e7eee65d05dcba81c474d2d | 8544d2eb3610d4d6e1f4f283d1fddf40881efae7 | refs/heads/master | 2023-03-20T00:13:02.432098 | 2021-03-06T14:13:46 | 2021-03-06T14:13:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,885 | py | """
Embeddings module
"""
import pickle
import os
import shutil
import numpy as np
from sklearn.decomposition import TruncatedSVD
from .ann import ANNFactory
from .scoring import ScoringFactory
from .vectors import VectorsFactory
class Embeddings:
"""
Model that builds sentence embeddings from a list of tokens.
Optional scoring method can be created to weigh tokens when creating embeddings. Averaging used if no scoring method provided.
The model also applies principal component analysis using a LSA model. This reduces the noise of common but less
relevant terms.
"""
# pylint: disable = W0231
def __init__(self, config=None):
"""
Creates a new Embeddings model.
Args:
config: embeddings configuration
"""
# Configuration
self.config = config
# Embeddings model
self.embeddings = None
# Dimensionality reduction model
self.lsa = None
# Embedding scoring method - weighs each word in a sentence
self.scoring = ScoringFactory.create(self.config["scoring"]) if self.config and self.config.get("scoring") else None
# Sentence vectors model
self.model = self.loadVectors() if self.config else None
def loadVectors(self):
"""
Loads a vector model set in config.
Returns:
vector model
"""
return VectorsFactory.create(self.config, self.scoring)
def score(self, documents):
"""
Builds a scoring index.
Args:
documents: list of (id, text|tokens, tags)
"""
if self.scoring:
# Build scoring index over documents
self.scoring.index(documents)
def index(self, documents):
"""
Builds an embeddings index.
Args:
documents: list of (id, text|tokens, tags)
"""
# Transform documents to embeddings vectors
ids, dimensions, stream = self.model.index(documents)
# Load streamed embeddings back to memory
embeddings = np.empty((len(ids), dimensions), dtype=np.float32)
with open(stream, "rb") as queue:
for x in range(embeddings.shape[0]):
embeddings[x] = pickle.load(queue)
# Remove temporary file
os.remove(stream)
# Build LSA model (if enabled). Remove principal components from embeddings.
if self.config.get("pca"):
self.lsa = self.buildLSA(embeddings, self.config["pca"])
self.removePC(embeddings)
# Normalize embeddings
self.normalize(embeddings)
# Save embeddings metadata
self.config["ids"] = ids
self.config["dimensions"] = dimensions
# Create embeddings index
self.embeddings = ANNFactory.create(self.config)
# Build the index
self.embeddings.index(embeddings)
def buildLSA(self, embeddings, components):
"""
Builds a LSA model. This model is used to remove the principal component within embeddings. This helps to
smooth out noisy embeddings (common words with less value).
Args:
embeddings: input embeddings matrix
components: number of model components
Returns:
LSA model
"""
svd = TruncatedSVD(n_components=components, random_state=0)
svd.fit(embeddings)
return svd
def removePC(self, embeddings):
"""
Applies a LSA model to embeddings, removed the top n principal components. Operation applied
directly on array.
Args:
embeddings: input embeddings matrix
"""
pc = self.lsa.components_
factor = embeddings.dot(pc.transpose())
# Apply LSA model
# Calculation is different if n_components = 1
if pc.shape[0] == 1:
embeddings -= factor * pc
elif len(embeddings.shape) > 1:
# Apply model on a row-wise basis to limit memory usage
for x in range(embeddings.shape[0]):
embeddings[x] -= factor[x].dot(pc)
else:
# Single embedding
embeddings -= factor.dot(pc)
def normalize(self, embeddings):
"""
Normalizes embeddings using L2 normalization. Operation applied directly on array.
Args:
embeddings: input embeddings matrix
"""
# Calculation is different for matrices vs vectors
if len(embeddings.shape) > 1:
embeddings /= np.linalg.norm(embeddings, axis=1)[:, np.newaxis]
else:
embeddings /= np.linalg.norm(embeddings)
def transform(self, document):
"""
Transforms document into an embeddings vector. Document text will be tokenized if not pre-tokenized.
Args:
document: (id, text|tokens, tags)
Returns:
embeddings vector
"""
# Convert document into sentence embedding
embedding = self.model.transform(document)
# Reduce the dimensionality of the embeddings. Scale the embeddings using this
# model to reduce the noise of common but less relevant terms.
if self.lsa:
self.removePC(embedding)
# Normalize embeddings
self.normalize(embedding)
return embedding
def batchtransform(self, documents):
"""
Transforms documents into embeddings vectors. Document text will be tokenized if not pre-tokenized.
Args:
documents: list of (id, text|tokens, tags)
Returns:
embeddings vectors
"""
return [self.transform(document) for document in documents]
def search(self, query, limit=3):
"""
Finds documents in the embeddings model most similar to the input query. Returns
a list of (id, score) sorted by highest score, where id is the document id in
the embeddings model.
Args:
query: query text|tokens
limit: maximum results
Returns:
list of (id, score)
"""
return self.batchsearch([query], limit)[0]
def batchsearch(self, queries, limit=3):
"""
Finds documents in the embeddings model most similar to the input queries. Returns
a list of (id, score) sorted by highest score per query, where id is the document id
in the embeddings model.
Args:
queries: queries text|tokens
limit: maximum results
Returns:
list of (id, score) per query
"""
# Convert queries to embedding vectors
embeddings = np.array([self.transform((None, query, None)) for query in queries])
# Search embeddings index
results = self.embeddings.search(embeddings, limit)
# Map ids if id mapping available
lookup = self.config.get("ids")
if lookup:
results = [[(lookup[i], score) for i, score in r] for r in results]
return results
def similarity(self, query, texts):
"""
Computes the similarity between query and list of text. Returns a list of
(id, score) sorted by highest score, where id is the index in texts.
Args:
query: query text|tokens
texts: list of text|tokens
Returns:
list of (id, score)
"""
return self.batchsimilarity([query], texts)[0]
def batchsimilarity(self, queries, texts):
"""
Computes the similarity between list of queries and list of text. Returns a list
of (id, score) sorted by highest score per query, where id is the index in texts.
Args:
queries: queries text|tokens
texts: list of text|tokens
Returns:
list of (id, score) per query
"""
# Convert queries to embedding vectors
queries = np.array([self.transform((None, query, None)) for query in queries])
texts = np.array([self.transform((None, text, None)) for text in texts])
# Dot product on normalized vectors is equal to cosine similarity
scores = np.dot(queries, texts.T).tolist()
# Add index id and sort desc based on score
return [sorted(enumerate(score), key=lambda x: x[1], reverse=True) for score in scores]
def load(self, path):
"""
Loads a pre-trained model.
Models have the following files:
config - configuration
embeddings - sentence embeddings index
lsa - LSA model, used to remove the principal component(s)
scoring - scoring model used to weigh word vectors
vectors - vectors model
Args:
path: input directory path
"""
# Index configuration
with open("%s/config" % path, "rb") as handle:
self.config = pickle.load(handle)
# Build full path to embedding vectors file
if self.config.get("storevectors"):
self.config["path"] = os.path.join(path, self.config["path"])
# Sentence embeddings index
self.embeddings = ANNFactory.create(self.config)
self.embeddings.load("%s/embeddings" % path)
# Dimensionality reduction
if self.config.get("pca"):
with open("%s/lsa" % path, "rb") as handle:
self.lsa = pickle.load(handle)
# Embedding scoring
if self.config.get("scoring"):
self.scoring = ScoringFactory.create(self.config["scoring"])
self.scoring.load(path)
# Sentence vectors model - transforms text into sentence embeddings
self.model = self.loadVectors()
def save(self, path):
"""
Saves a model.
Args:
path: output directory path
"""
if self.config:
# Create output directory, if necessary
os.makedirs(path, exist_ok=True)
# Copy vectors file
if self.config.get("storevectors"):
shutil.copyfile(self.config["path"], os.path.join(path, os.path.basename(self.config["path"])))
self.config["path"] = os.path.basename(self.config["path"])
# Write index configuration
with open("%s/config" % path, "wb") as handle:
pickle.dump(self.config, handle, protocol=pickle.HIGHEST_PROTOCOL)
# Write sentence embeddings index
self.embeddings.save("%s/embeddings" % path)
# Save dimensionality reduction
if self.lsa:
with open("%s/lsa" % path, "wb") as handle:
pickle.dump(self.lsa, handle, protocol=pickle.HIGHEST_PROTOCOL)
# Save embedding scoring
if self.scoring:
self.scoring.save(path)
| [
"561939+davidmezzetti@users.noreply.github.com"
] | 561939+davidmezzetti@users.noreply.github.com |
f3c2f424721aba59acfd050755659e84ff609db8 | 759653bf8bd290e023d8f71a0cd5faa95c1687b0 | /code/771.py | eba1126050396bc1573007cc45ac457e4b9518bf | [] | no_license | RuidongZ/LeetCode | 6032fc02d3f996155c4f6965f2ad2fc48de6c3c2 | ef8f9edd7857f4ef103924e21224dcd878c87196 | refs/heads/master | 2022-02-27T12:32:00.261851 | 2019-10-17T08:54:34 | 2019-10-17T08:54:34 | 115,314,228 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | # -*- Encoding:UTF-8 -*-
# 771. Jewels and Stones
# You're given strings J representing the types of stones that are jewels, and S representing the stones you have.
# Each character in S is a type of stone you have. You want to know how many of the stones you have are also jewels.
#
# The letters in J are guaranteed distinct, and all characters in J and S are letters.
# Letters are case sensitive, so "a" is considered a different type of stone from "A".
#
# Example 1:
#
# Input: J = "aA", S = "aAAbbbb"
# Output: 3
# Example 2:
#
# Input: J = "z", S = "ZZ"
# Output: 0
# Note:
#
# S and J will consist of letters and have length at most 50.
# The characters in J are distinct.
class Solution(object):
def numJewelsInStones(self, J, S):
"""
:type J: str
:type S: str
:rtype: int
"""
cnt = 0
for i in S:
if i in J:
cnt += 1
return cnt
| [
"459597855@qq.com"
] | 459597855@qq.com |
cfea9560e5ec1f92438c4d1bfe85bfcbed0afd05 | 9d862dd68f8b4ea4e7de9397fef8592824c77449 | /app/top/api/rest/WlbOrderCancelRequest.py | 09660e9157ded1486da3eb9a863973538165a9b6 | [] | no_license | hi-noikiy/tmall-sku-outer_id | ffaca630dfb288ca33d962b8a050932d1047b9c8 | 1bcf29386a513bcb210bf5d91016e0dcb1ebc1ad | refs/heads/master | 2021-05-09T18:20:27.150316 | 2017-03-08T06:43:57 | 2017-03-08T06:43:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | '''
Created by auto_sdk on 2016.04.14
'''
from app.top.api.base import RestApi
class WlbOrderCancelRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.wlb_order_code = None
def getapiname(self):
return 'taobao.wlb.order.cancel'
| [
"1037096435@qq.com"
] | 1037096435@qq.com |
40ca69a149cf8e465343e98e58f19e00a9b53ce6 | 33faef69c6ceb5944edae07d76175dafe8c7ec17 | /web_robotframework/migrations/0014_auto_20181116_1501.py | df6c256eb6a1124c4a6d077221ffdc13cac5f18f | [] | no_license | waterfronter/AutoZone | 86edfe45f8d92839f31d1e4608f13d309017fc8d | dec180c4aaa88dc015ff7ca1d8e75f1967062a6b | refs/heads/master | 2020-06-11T07:25:59.439326 | 2019-06-06T09:02:42 | 2019-06-06T09:02:42 | 193,890,095 | 0 | 1 | null | 2019-06-26T11:19:39 | 2019-06-26T11:19:39 | null | UTF-8 | Python | false | false | 454 | py | # Generated by Django 2.1.1 on 2018-11-16 07:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web_robotframework', '0013_auto_20181116_1452'),
]
operations = [
migrations.AlterField(
model_name='add_web_steps',
name='webtestresult',
field=models.CharField(default=None, max_length=50, verbose_name='测试结果'),
),
]
| [
"1633235633@qq.com"
] | 1633235633@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.