blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2f132d128561ac039be320bd5174c2847e302c7c | 659b66f248173fb99d6d1f43978064b576a9da6f | /check_mail.py | 3dfaf0909b488340ba23ccc586345ecdc3b716d0 | [] | no_license | tuanpx9184-cowell/learn-python | 25981ab0d27b4afa764a3990decc5911ba525d1f | aa0fbce15677e6d58b824ddb4d133e4e59b152db | refs/heads/master | 2022-04-05T17:08:07.225789 | 2019-12-09T07:59:38 | 2019-12-09T07:59:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | import re
def isValidEmail(email):
if len(email) > 7:
if re.match("^.+@(\[?)[a-zA-Z0-9-.]+.([a-zA-Z]{2,3}|[0-9]{1,3})(]?)$", email) is not None:
return True
return False
if isValidEmail("my.email@gmail.com"):
print("This is a valid email address")
else:
print("This is not a valid email address")
| [
"tuanpx.dev@gmail.com"
] | tuanpx.dev@gmail.com |
c1b26cced7bf736c91ff5349abd7750a5eefa8d8 | e60487a8f5aad5aab16e671dcd00f0e64379961b | /python_stack/Algos/numPy/updateNumpy.py | 764b3af48fbc0778e1b980e0ca73c7c9f9fe3f14 | [] | no_license | reenadangi/python | 4fde31737e5745bc5650d015e3fa4354ce9e87a9 | 568221ba417dda3be7f2ef1d2f393a7dea6ccb74 | refs/heads/master | 2021-08-18T08:25:40.774877 | 2021-03-27T22:20:17 | 2021-03-27T22:20:17 | 247,536,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | import numpy as np
x=np.array([12,34,56,78,99])
y=np.array([[1,2,3],[4,5,6],[7,8,9]])
print(f"Orginal array{x}")
# access
print(x[0],x[len(x)-1],x[-1],x[-2])
# Modify
for i in range(len(x)):
x[i]=x[i]*2
# delete first and last element
x=np.delete(x,[0,4])
print(x)
print(y)
# delete first row (x axis)
y=np.delete(y,[0],axis=0)
print(y)
# delete first col(y axis)
y=np.delete(y,[0],axis=1)
print(y)
# append
print(x.dtype)
x=np.append(x,[14.5,243])
print(x)
print(x.dtype)
# insert
x=np.insert(x,1,58)
print(x)
x=np.insert(x,2,3)
print(x)
y=np.insert(y,1,34,axis=1)
print(y)
# stacking - vstack/hstack
# It's important that size of stacks are same
x=np.array([1,2,3])
y=np.array([30,40,50])
z=np.vstack((x,y))
print(z)
# hstack - Horizontal
z=np.hstack((x,y))
print(z)
| [
"reena.dangi@gmail.com"
] | reena.dangi@gmail.com |
d667cd2f95e0010147408d1bee42456d1f50ab10 | 8a155ccffb7d10c394bfff752d2c3c33892940ea | /KnowledgeProblem/models.py | af4605b6021e39add4382fcf23586b194cfad4c9 | [] | no_license | TONG404/WHUTravler | b5ba041db1d61878565c9553ffdae419074997ef | d57a2bd9a627af7a1a6c5373b95f489ca87bd2d3 | refs/heads/master | 2023-03-18T20:33:27.462230 | 2021-03-05T16:25:34 | 2021-03-05T16:25:34 | 344,848,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | from django.db import models
# Create your models here.
class Knowledge(models.Model):
id = models.IntegerField(primary_key=True)
problem = models.CharField(max_length=320)
selA = models.CharField(max_length=40)
selB = models.CharField(max_length=40)
selC = models.CharField(max_length=40)
selD = models.CharField(max_length=40)
type = models.IntegerField()
answer = models.IntegerField() | [
"15809477910@163.com"
] | 15809477910@163.com |
284ce95f34b4a10c66e71f2e3477dda5167fac94 | b6d2354b06732b42d3de49d3054cb02eb30298c4 | /finance/models/score.py | df2c1a647c8480e32ca35a6f81dc0cb04266d188 | [] | no_license | trivvet/finplanner | 52ad276839bfae67821b9684f7db549334ef0a59 | 1d82d1a09da6f04fced6f71b53aeb784af00f758 | refs/heads/master | 2020-03-17T23:24:25.071311 | 2018-10-28T10:12:07 | 2018-10-28T10:12:07 | 134,043,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,408 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class ScorePrototype(models.Model):
class Meta:
abstract=True
month = models.ForeignKey(
'Month',
on_delete=models.CASCADE,
blank=False,
null=False,
verbose_name="Month"
)
amount = models.IntegerField(
blank=False,
null=False,
verbose_name="Money Amount"
)
remainder = models.IntegerField(
blank=True,
null=True,
verbose_name="Money Remainder"
)
class Score(ScorePrototype):
class Meta:
verbose_name = "Belance"
verbose_name_plural = "Belances"
account = models.ForeignKey(
'Account',
on_delete=models.CASCADE,
blank=False,
null=False,
verbose_name="Bank Account"
)
def __unicode__(self):
return u"Залишок за %s по %s" % (self.month.name, self.account)
class PlannedExpense(ScorePrototype):
class Meta:
verbose_name = "Planned Expense"
verbose_name_plural = "Planned Expenses"
title = models.CharField(
max_length=256,
blank=False,
null=False,
verbose_name="Title"
)
def __unicode__(self):
return u"Заплановані витрати на %s за %s" % (self.title, self.month.name) | [
"trivvet@gmail.com"
] | trivvet@gmail.com |
b19de6f92104daeabffb2649ab270d6ee55856fd | d5023f951158275b97fc0794aa04427a5258858b | /main.py | 05841bce35b5db81442f021868d89804d347d80b | [] | no_license | AakashBudhera/Minemapper | a7342f33917ea61ddbaa020fba8cebe2e3aff008 | a61efffd3709d98308e38fb93d2bd454acf2c81d | refs/heads/master | 2022-11-04T21:59:12.292582 | 2020-06-18T03:17:22 | 2020-06-18T03:17:22 | 273,130,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | from ipywidgets.embed import embed_minimal_html
from PIL import ImageFilter
import gmaps
gmaps.configure(api_key="AIzaSyCo99awBRG0JvRCoJC8M12-3EiAoLfElSM")
fig = gmaps.figure()
from PIL import Image
im = Image.open("/Users/Aakash/36.070403, 68.629560, 68.673354.png")
inputim=im.filter(ImageFilter.ModeFilter(8))
inputim.show()
pix=inputim.load()
imout = im.copy()
pixout=imout.load()
deltax=68.673354-68.629560
locations=[]
xlist=[]
ylist=[]
for i in range(0,inputim.size[0],8): #x-axis search
for j in range(0,inputim.size[1],8): #y-axis search
if pix[i,j][1] > 140:
pixout[i,j]= (255,0,0)
xlist=xlist+[-j/1000*delta+36.070403]
ylist=ylist+[i/1000*delta+68.629560]
for k in range(0,len(xlist),5):
locations=locations+[(xlist[k],ylist[k])]
imout.show()
marker = gmaps.marker_layer(locations)
fig.add_layer(marker)
embed_minimal_html('export3.html', views=[fig])
| [
"noreply@github.com"
] | noreply@github.com |
f284deeabab19ea1adbc370ff61a3d7bf21a0ee6 | 99052370591eadf44264dbe09022d4aa5cd9687d | /install/lib/python2.7/dist-packages/cartesian_planner/msg/_cart_moveGoal.py | f245dd6ac0ba90eb7cf6a28424f7787009277745 | [] | no_license | brucemingxinliu/ros_ws | 11b1a3e142132925d35b3adf929f1000392c5bdc | 45f7e553ea20b79e3e93af5f77a1b14b64184875 | refs/heads/master | 2021-01-24T03:36:47.043040 | 2018-02-26T00:53:37 | 2018-02-26T00:53:37 | 122,892,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,447 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from cartesian_planner/cart_moveGoal.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import std_msgs.msg
class cart_moveGoal(genpy.Message):
_md5sum = "5bd816596081b2b0fbcdf7dad29bf944"
_type = "cartesian_planner/cart_moveGoal"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
#cartesian-move action interface
#minimally, it may contain just a command code
#more generally, it may contain desired tool-frame pose, as well
# as gripper pose (gripper opening, or vacuum gripper on/off)
# and an arrival time for the move
# It is assumed that a move starts from the previous commanded pose, or from the current joint state
#return codes provide status info, e.g. if a proposed move is reachable
#define message constants:
uint8 ARM_TEST_MODE = 0
#queries
uint8 ARM_IS_SERVER_BUSY_QUERY = 1
uint8 ARM_QUERY_IS_PATH_VALID = 2
uint8 GET_TOOL_POSE = 5
uint8 GET_Q_DATA = 7
#requests for motion plans;
uint8 PLAN_PATH_CURRENT_TO_WAITING_POSE=20
#uint8 PLAN_PATH_CURRENT_TO_PRE_POSE=20 #synonym
uint8 PLAN_JSPACE_PATH_CURRENT_TO_CART_GRIPPER_POSE = 21 #plan a joint-space path from current arm pose to some IK soln of Cartesian goal
uint8 PLAN_PATH_CURRENT_TO_GOAL_GRIPPER_POSE=22 #plan cartesian path from current arm pose to goal gripper pose
uint8 PLAN_FINE_PATH_CURRENT_TO_GOAL_GRIPPER_POSE = 23 #plan path to specified gripper pose #as above, but hi-res
uint8 PLAN_PATH_CURRENT_TO_GOAL_DP_XYZ = 24 #rectilinear translation w/ fixed orientation
uint8 PLAN_JSPACE_PATH_CURRENT_TO_QGOAL = 25
uint8 TIME_RESCALE_PLANNED_TRAJECTORY = 40 #can make arm go slower/faster with provided time-stretch factor
uint8 REFINE_PLANNED_TRAJECTORY = 41 #if used approx IK soln, use this option to refine solns
uint8 SET_ARRIVAL_TIME_PLANNED_TRAJECTORY = 42 #used to set desired arrival time; put arrival time value in goal time_scale_stretch_factor
# request to preview plan:
#uint8 DISPLAY_TRAJECTORY = 50
#MOVE command!
uint8 EXECUTE_PLANNED_PATH = 100
#uint8 ARM_DESCEND_20CM=101
#uint8 ARM_DEPART_20CM=102
#goal:
int32 command_code
geometry_msgs/PoseStamped des_pose_gripper
float64[] arm_dp #to command a 3-D vector displacement relative to current pose, fixed orientation
float64[] q_goal
float64 time_scale_stretch_factor
================================================================================
MSG: geometry_msgs/PoseStamped
# A Pose with reference coordinate frame and timestamp
Header header
Pose pose
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of postion and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
"""
# Pseudo-constants
ARM_TEST_MODE = 0
ARM_IS_SERVER_BUSY_QUERY = 1
ARM_QUERY_IS_PATH_VALID = 2
GET_TOOL_POSE = 5
GET_Q_DATA = 7
PLAN_PATH_CURRENT_TO_WAITING_POSE = 20
PLAN_JSPACE_PATH_CURRENT_TO_CART_GRIPPER_POSE = 21
PLAN_PATH_CURRENT_TO_GOAL_GRIPPER_POSE = 22
PLAN_FINE_PATH_CURRENT_TO_GOAL_GRIPPER_POSE = 23
PLAN_PATH_CURRENT_TO_GOAL_DP_XYZ = 24
PLAN_JSPACE_PATH_CURRENT_TO_QGOAL = 25
TIME_RESCALE_PLANNED_TRAJECTORY = 40
REFINE_PLANNED_TRAJECTORY = 41
SET_ARRIVAL_TIME_PLANNED_TRAJECTORY = 42
EXECUTE_PLANNED_PATH = 100
__slots__ = ['command_code','des_pose_gripper','arm_dp','q_goal','time_scale_stretch_factor']
_slot_types = ['int32','geometry_msgs/PoseStamped','float64[]','float64[]','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
command_code,des_pose_gripper,arm_dp,q_goal,time_scale_stretch_factor
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(cart_moveGoal, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.command_code is None:
self.command_code = 0
if self.des_pose_gripper is None:
self.des_pose_gripper = geometry_msgs.msg.PoseStamped()
if self.arm_dp is None:
self.arm_dp = []
if self.q_goal is None:
self.q_goal = []
if self.time_scale_stretch_factor is None:
self.time_scale_stretch_factor = 0.
else:
self.command_code = 0
self.des_pose_gripper = geometry_msgs.msg.PoseStamped()
self.arm_dp = []
self.q_goal = []
self.time_scale_stretch_factor = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_i3I.pack(_x.command_code, _x.des_pose_gripper.header.seq, _x.des_pose_gripper.header.stamp.secs, _x.des_pose_gripper.header.stamp.nsecs))
_x = self.des_pose_gripper.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_7d.pack(_x.des_pose_gripper.pose.position.x, _x.des_pose_gripper.pose.position.y, _x.des_pose_gripper.pose.position.z, _x.des_pose_gripper.pose.orientation.x, _x.des_pose_gripper.pose.orientation.y, _x.des_pose_gripper.pose.orientation.z, _x.des_pose_gripper.pose.orientation.w))
length = len(self.arm_dp)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *self.arm_dp))
length = len(self.q_goal)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *self.q_goal))
buff.write(_struct_d.pack(self.time_scale_stretch_factor))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.des_pose_gripper is None:
self.des_pose_gripper = geometry_msgs.msg.PoseStamped()
end = 0
_x = self
start = end
end += 16
(_x.command_code, _x.des_pose_gripper.header.seq, _x.des_pose_gripper.header.stamp.secs, _x.des_pose_gripper.header.stamp.nsecs,) = _struct_i3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.des_pose_gripper.header.frame_id = str[start:end].decode('utf-8')
else:
self.des_pose_gripper.header.frame_id = str[start:end]
_x = self
start = end
end += 56
(_x.des_pose_gripper.pose.position.x, _x.des_pose_gripper.pose.position.y, _x.des_pose_gripper.pose.position.z, _x.des_pose_gripper.pose.orientation.x, _x.des_pose_gripper.pose.orientation.y, _x.des_pose_gripper.pose.orientation.z, _x.des_pose_gripper.pose.orientation.w,) = _struct_7d.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
self.arm_dp = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
self.q_goal = struct.unpack(pattern, str[start:end])
start = end
end += 8
(self.time_scale_stretch_factor,) = _struct_d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_i3I.pack(_x.command_code, _x.des_pose_gripper.header.seq, _x.des_pose_gripper.header.stamp.secs, _x.des_pose_gripper.header.stamp.nsecs))
_x = self.des_pose_gripper.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_7d.pack(_x.des_pose_gripper.pose.position.x, _x.des_pose_gripper.pose.position.y, _x.des_pose_gripper.pose.position.z, _x.des_pose_gripper.pose.orientation.x, _x.des_pose_gripper.pose.orientation.y, _x.des_pose_gripper.pose.orientation.z, _x.des_pose_gripper.pose.orientation.w))
length = len(self.arm_dp)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(self.arm_dp.tostring())
length = len(self.q_goal)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(self.q_goal.tostring())
buff.write(_struct_d.pack(self.time_scale_stretch_factor))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.des_pose_gripper is None:
self.des_pose_gripper = geometry_msgs.msg.PoseStamped()
end = 0
_x = self
start = end
end += 16
(_x.command_code, _x.des_pose_gripper.header.seq, _x.des_pose_gripper.header.stamp.secs, _x.des_pose_gripper.header.stamp.nsecs,) = _struct_i3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.des_pose_gripper.header.frame_id = str[start:end].decode('utf-8')
else:
self.des_pose_gripper.header.frame_id = str[start:end]
_x = self
start = end
end += 56
(_x.des_pose_gripper.pose.position.x, _x.des_pose_gripper.pose.position.y, _x.des_pose_gripper.pose.position.z, _x.des_pose_gripper.pose.orientation.x, _x.des_pose_gripper.pose.orientation.y, _x.des_pose_gripper.pose.orientation.z, _x.des_pose_gripper.pose.orientation.w,) = _struct_7d.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
self.arm_dp = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
self.q_goal = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 8
(self.time_scale_stretch_factor,) = _struct_d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_i3I = struct.Struct("<i3I")
_struct_7d = struct.Struct("<7d")
_struct_d = struct.Struct("<d")
| [
"mxl592@case.edu"
] | mxl592@case.edu |
45fa2dbee61e221e1175c00b267661e423d7a2be | 757facc4046b40664b6b679f7e3a58c1da48845a | /ex19.py | 0072fad8ad8c91b1dc3d859d289b283a3763b5fa | [] | no_license | Samkelo-Kinetic/Kinetics | 2990e6740df8b1ce2768ce2243872f8407895c3c | 963b1df057559a395f7b61eea32552c8fd917c84 | refs/heads/master | 2020-03-27T23:53:33.330280 | 2018-10-12T12:28:19 | 2018-10-12T12:28:19 | 147,355,885 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | #Functions and Variables
def cheese_and_crackers(cheese_count, boxes_of_crackers):
print "You Have %d cheeses!" % cheese_count
print "You have %d boxes of crackers!" % boxes_of_crackers
print "Man that's not enough for party!"
print "Get a blanket.\n"
print "we can just give the function numbers directly:"
cheese_and_crackers(20, 30)
print "OR, we can use variables from our script:"
amount_of_cheese = 10
amount_of_crackers = 50
cheese_and_crackers(amount_of_cheese, amount_of_crackers)
print "We can even do math inside too:"
cheese_and_crackers(10 + 20, 5 + 6)
print "And we can combine the two, variables and math:"
cheese_and_crackers(amount_of_cheese + 100, amount_of_crackers + 1000)
| [
"samkelo@kineticskunk.com"
] | samkelo@kineticskunk.com |
256bb7942ddc5136f4fa22e73823cc34bb46d2c0 | 0156514d371c04da404b50994804ede8d264042a | /rest_batteries/exception_handlers.py | 15824e0dd41058bf34e5dd42c73220ed016ef552 | [
"MIT"
] | permissive | defineimpossible/django-rest-batteries | 68b074f18fcae304b9bac4a242f9a9eea98c6e9c | 951cc7ec153d1342a861d7f6468862000d5ea9f3 | refs/heads/master | 2023-07-21T10:45:18.133691 | 2023-07-11T02:52:45 | 2023-07-11T02:52:45 | 284,420,681 | 21 | 0 | MIT | 2023-07-11T02:34:06 | 2020-08-02T08:19:39 | Python | UTF-8 | Python | false | false | 397 | py | from rest_framework.views import exception_handler
from .errors_formatter import ErrorsFormatter
def errors_formatter_exception_handler(exc, context):
response = exception_handler(exc, context)
# If unexpected error occurs (server error, etc.)
if response is None:
return response
formatter = ErrorsFormatter(exc)
response.data = formatter()
return response
| [
"denis.orehovsky@gmail.com"
] | denis.orehovsky@gmail.com |
69bc2b87b4e297ce71f450a7c46c546972fa3449 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /PSg77AZJGACk4a7gt_6.py | 5bcd00492613d502e7d26232c6bfe6cf615fc660 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | """
For this challenge, forget how to add two numbers together. The best
explanation on what to do for this function is this meme:

### Examples
meme_sum(26, 39) ➞ 515
# 2+3 = 5, 6+9 = 15
# 26 + 39 = 515
meme_sum(122, 81) ➞ 1103
# 1+0 = 1, 2+8 = 10, 2+1 = 3
# 122 + 81 = 1103
meme_sum(1222, 30277) ➞ 31499
### Notes
N/A
"""
def meme_sum(a, b):
sum = ""
c=0
if b>a:
c=a
a=b
b=c
a = str(a)
b= str(b)
i=0
while i < (len(a)-len(b)):
sum = sum + a[i]
i += 1
i = 0
while i < len(b):
sum = sum + str((int(a[i+len(a)-len(b)])+ int(b[i])))
i += 1
return int(sum)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
71235e6cef2af20b7446f10bffc9e56fb288f23a | b1a93e393d94e493234e8c9ed00142cf692206a5 | /test_country_sorting.py | f6da434692d40d742476a3b57c132b02d8e7a0ce | [
"Apache-2.0"
] | permissive | mmarcikk/selenium_training | 2a0c811995c62793e623c61e569c247f853b6af9 | ff57a0498866a1aba0b484c79218bce494dfe218 | refs/heads/master | 2021-05-05T05:05:43.661873 | 2018-03-01T09:31:22 | 2018-03-01T09:31:22 | 118,660,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,060 | py | import pytest
from selenium import webdriver
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
wd.implicitly_wait(2)
request.addfinalizer(wd.quit)
return wd
def test_country_alphabetical_order(driver):
driver.get("http://localhost:8084/admin/?app=countries&doc=countries")
driver.find_element_by_name("username").send_keys("admin")
driver.find_element_by_name("password").send_keys("admin")
driver.find_element_by_name("login").click()
countries = driver.find_elements_by_css_selector('#content .dataTable tr')
counter = len(countries)
country_list = []
zone_list = []
for country in range(2, counter):
country_name = driver.find_element_by_css_selector('.dataTable tr:nth-child(%d) a' % country).text
country_list.append(country_name)
zone_number = driver.find_element_by_css_selector('.dataTable tr:nth-child(%d) td:nth-child(6)' % country)\
.get_attribute('textContent')
if zone_number > '0':
driver.find_element_by_css_selector('.dataTable tr:nth-child(%d) a' % country).click()
element = driver.find_elements_by_css_selector('.dataTable tr td:nth-child(3)')
couter_zone = len(element) + 1
for element2 in range(2, couter_zone):
country_zone = driver.find_element_by_css_selector('.dataTable tr:nth-child(%d) td:nth-child(3)' % element2)\
.get_attribute('textContent')
zone_list.append(country_zone)
if zone_list == sorted(zone_list):
print('All country zones are sorted alphabetically')
else:
print('Country zones are not sorted alphabetically')
print(zone_list)
del zone_list[:]
driver.find_element_by_css_selector('li#app-:nth-child(3)').click()
else:
continue
if country_list == sorted(country_list):
print('All countries are sorted alphabetically')
else:
print('Countries are not sorted alphabetically')
| [
"marta.chimiak@stxnext.pl"
] | marta.chimiak@stxnext.pl |
1483f821c0627d6b8a82c9014eb08f848bc8b71b | 97a5c3db7ee6dbd42c3594311fc37ac9a933dc2b | /user/forms.py | 181a19a22ed17552ba2e214f089d86cb4243e70a | [] | no_license | abhi877/Instagram_Clone | 0f972d48b1e43173f7a005845d579c4f99800931 | 77817165003e2f3c67aeee009ed5cd0e3febace2 | refs/heads/main | 2023-08-25T08:40:49.507770 | 2021-09-23T15:29:14 | 2021-09-23T15:29:14 | 409,145,361 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,896 | py | from django import forms
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth import password_validation
from django.contrib.auth.models import User
from django.utils.translation import gettext_lazy as _
from .models import Profile
from django.contrib.auth.forms import PasswordChangeForm
#using first_name as full_name
class SignUpForm(UserCreationForm):
username = forms.CharField(widget=(forms.TextInput(attrs={'class': 'signup-form', 'placeholder': 'Username'})),label='')
first_name = forms.CharField(widget=(forms.TextInput(attrs={'class': 'signup-form', 'placeholder': 'Full Name'})),label='', max_length=32)
email = forms.EmailField(widget=(forms.EmailInput(attrs={'class': 'signup-form', 'placeholder': 'Email'})),label='', max_length=64)
password1 = forms.CharField(widget=(forms.PasswordInput(attrs={'class': 'signup-form', 'placeholder': 'Password'})),label='')
password2 = forms.CharField(widget=(forms.PasswordInput(attrs={'class': 'signup-form', 'placeholder': 'Password Again'})),label='')
def clean(self):
email = self.cleaned_data.get('email')
username = self.cleaned_data.get('username')
if User.objects.filter(email=email).exists():
raise forms.ValidationError(f"Another account is using {email}")
if User.objects.filter(username=username).exists():
raise forms.ValidationError("This username isn't available. Please try another.")
return self.cleaned_data
class Meta:
model = User
fields = ('email', 'first_name','username', 'password1','password2')
class LoginForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
username = forms.CharField(widget=(forms.TextInput(attrs={'class': 'signup-form', 'placeholder': 'Username'})),label='')
password = forms.CharField(widget=(forms.PasswordInput(attrs={'class': 'signup-form', 'placeholder': 'Password'})),label='')
class UserEditForm(forms.ModelForm):
class Meta:
model = User
fields = ['first_name', 'username', 'email']
def __init__(self, *args, **kwargs):
super(UserEditForm, self).__init__(*args, **kwargs)
self.fields['first_name'].widget = forms.TextInput(attrs={'class': 'user-edit-form','placeholder': 'Name'},)
self.fields['first_name'].label = 'Name'
self.fields['username'].widget = forms.TextInput(attrs={'class': 'user-edit-form','placeholder': 'Username'},)
self.fields['username'].help_text = None
self.fields['email'].widget = forms.TextInput(attrs={'class': 'user-edit-form','placeholder': 'Email'},)
self.fields['email'].label = 'Email'
class ProfileEditForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['photo','website','bio', 'phone']
def __init__(self, *args, **kwargs):
super(ProfileEditForm, self).__init__(*args, **kwargs)
self.fields['website'].widget = forms.TextInput(attrs={'class': 'user-edit-form','placeholder': 'Website'},)
self.fields['bio'].widget = forms.TextInput(attrs={'class': 'user-edit-form','placeholder': 'Bio'},)
self.fields['phone'].widget = forms.TextInput(attrs={'class': 'user-edit-form','placeholder': 'Phone'},)
class passwordChangeForm(PasswordChangeForm):
def __init__(self, *args, **kwargs):
super(passwordChangeForm, self).__init__(*args, **kwargs)
self.fields['old_password'].widget = forms.PasswordInput(attrs={'class': 'user-edit-form','placeholder': 'current passowrd'},)
self.fields['new_password1'].widget = forms.PasswordInput(attrs={'class': 'user-edit-form','placeholder': 'new passowrd'},)
self.fields['new_password2'].widget = forms.PasswordInput(attrs={'class': 'user-edit-form','placeholder': 'confirm passowrd'},)
| [
"kumar.abhina5670@gmail.com"
] | kumar.abhina5670@gmail.com |
e1c6b945316596e701607f76ce9c27b844005759 | 83a4ff375458206a06e8e9bd029ed2a50eb3fa4f | /green/iMetricalCouch/src/imetricalcouch.py | 208722974e9a7822f59095436273ed7b811df1eb | [] | no_license | daneroo/snookr-gcode | fdb4977b2ea2ddb70a3e6cd085a0f7925aabd63c | a30f7910a08dfaa5812dbc8a2989c553da1ad353 | refs/heads/master | 2021-01-25T07:33:58.205388 | 2017-07-27T19:36:32 | 2017-07-27T19:36:32 | 32,338,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,090 | py | # populate couch instance with imetrical data
#
#import simplejson as json
import json
import urllib2
import datetime
import couchdb
__author__="daniel"
__date__ ="$Oct 31, 2010 6:57:01 PM$"
JSONURL = "http://cantor/iMetrical/getJSON.php";
COUCHDBNAME = "imetricaltest"
couch = couchdb.Server()
#couch = couchdb.Server('http://example.com:5984/')
try:
db = couch.create(COUCHDBNAME)
print 'database (%s) created' % COUCHDBNAME
except:
del couch[COUCHDBNAME]
db = couch.create(COUCHDBNAME)
print 'database (%s) deleted and created' % COUCHDBNAME
def pretty(any):
print json.dumps(any, sort_keys=True, indent=4)
def loadJSON(url):
result = json.load(urllib2.urlopen(url))
#print result
#pretty(result)
return result;
if __name__ == "__main__":
print "Relax, iMetrical Couch!"
#pretty(['foo', {'bar': ('baz', None, 1.0, 2)}])
observations = loadJSON(JSONURL)
for obs in observations:
#print "saving obs:"
#pretty(obs)
db.save(obs)
#pretty(obs)
print "Saved %d observations." % (len(observations)) | [
"daneroo@users.noreply.github.com"
] | daneroo@users.noreply.github.com |
4bc6b2ded7a42b226ac3a04ee7c6be4878dd796e | 8ca4992e5c7f009147875549cee21c0efb7c03eb | /mmseg/models/decode_heads/nl_head.py | bbbe70b5fb7233fd840941678657950119fda43e | [
"Apache-2.0"
] | permissive | JiayuZou2020/DiffBEV | 0ada3f505fc5106d8b0068c319f0b80ed366b673 | 527acdb82ac028061893d9d1bbe69e589efae2a0 | refs/heads/main | 2023-05-23T07:25:39.465813 | 2023-04-04T02:53:05 | 2023-04-04T02:53:05 | 613,895,691 | 181 | 8 | null | null | null | null | UTF-8 | Python | false | false | 1,655 | py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.cnn import NonLocal2d
from ..builder import HEADS
from .fcn_head import FCNHead
@HEADS.register_module()
class NLHead(FCNHead):
"""Non-local Neural Networks.
This head is the implementation of `NLNet
<https://arxiv.org/abs/1711.07971>`_.
Args:
reduction (int): Reduction factor of projection transform. Default: 2.
use_scale (bool): Whether to scale pairwise_weight by
sqrt(1/inter_channels). Default: True.
mode (str): The nonlocal mode. Options are 'embedded_gaussian',
'dot_product'. Default: 'embedded_gaussian.'.
"""
def __init__(self,
reduction=2,
use_scale=True,
mode='embedded_gaussian',
**kwargs):
super(NLHead, self).__init__(num_convs=2, **kwargs)
self.reduction = reduction
self.use_scale = use_scale
self.mode = mode
self.nl_block = NonLocal2d(
in_channels=self.channels,
reduction=self.reduction,
use_scale=self.use_scale,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
mode=self.mode)
def forward(self, inputs):
"""Forward function."""
x = self._transform_inputs(inputs)
output = self.convs[0](x)
output = self.nl_block(output)
output = self.convs[1](output)
if self.concat_input:
output = self.conv_cat(torch.cat([x, output], dim=1))
output = self.cls_seg(output)
return output
| [
"noreply@github.com"
] | noreply@github.com |
dfde88d376bff11e0d3d19554790b4b6966a9654 | 7d1c9017a7744deb1681b2d646feb4212e65d0ec | /lib/build_pack_utils/runner.py | e01a3d634f79465b5be021da57008185de88dbff | [
"Apache-2.0"
] | permissive | puteulanus/cf-php-build-pack | db8fe5623f329b3c44006fd695e0214621f85cfe | 3e125a7819bcf36c5c71fd4458bb567b6eb327ff | refs/heads/master | 2020-05-23T11:16:11.797222 | 2014-10-15T04:13:52 | 2014-10-15T04:13:52 | 25,183,730 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,850 | py | import os
import os.path
import tempfile
import subprocess
import logging
# This and check_output are shims to support features of Python 2.7
# on Python 2.6.
#
# This code was borrowed from PyPy 2.7.
# bitbucket.org/pypy/pypy/src/9d88b4875d6e/lib-python/2.7/subprocess.py
#
# This can be removed when the CloudFoundry environment is upgraded
# to Python 2.7 or higher.
#
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (
self.cmd, self.returncode)
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=subprocess.STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output=output)
return output
def stream_output(*popenargs, **kwargs):
r"""Run command with arguments and stream its output.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute.
The first argument should be the file like object where the output
should be written. The remainder of the arguments are the same as
for the Popen constructor.
Example:
>>> fp = open('cmd-output.txt', 'wb')
>>> stream_output(fp, ["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> fp = open('cmd-output.txt', 'wb')
>>> stream_output(fp, ["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=subprocess.STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
if hasattr(popenargs[0], 'fileno'):
process = subprocess.Popen(stdout=popenargs[0],
*popenargs[1:], **kwargs)
retcode = process.wait()
else:
process = subprocess.Popen(stdout=subprocess.PIPE,
*popenargs[1:], **kwargs)
for c in iter(lambda: process.stdout.read(1024), ''):
popenargs[0].write(c)
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
class BuildPack(object):
def __init__(self, ctx, url, branch=None):
self._ctx = ctx
self._url = url
self._branch = branch
self.bp_dir = tempfile.mkdtemp(prefix='buildpack')
self._log = logging.getLogger('runner')
def run(self):
if self._url:
self._clone()
self.framework = self._detect()
print self._compile()
self.start_yml = self._release()
def _clone(self):
self._log.debug("Clongin [%s] to [%s]", self._url, self.bp_dir)
subprocess.call(['git', 'clone', self._url, self.bp_dir])
if self._branch:
self._log.debug("Branching to [%s]", self._branch)
subprocess.call(['git', 'checkout', self._branch])
def _detect(self):
self._log.debug("Running detect script")
cmd = [os.path.join(self.bp_dir, 'bin', 'detect'),
self._ctx['BUILD_DIR']]
return check_output(" ".join(cmd),
stderr=subprocess.STDOUT,
shell=True).strip()
def _compile(self):
self._log.debug("Running compile script with build dir [%s] "
"and cache dir [%s]",
self._ctx['BUILD_DIR'],
self._ctx['CACHE_DIR'])
cmd = [os.path.join(self.bp_dir, 'bin', 'compile'),
self._ctx['BUILD_DIR'],
self._ctx['CACHE_DIR']]
return check_output(" ".join(cmd),
stderr=subprocess.STDOUT,
shell=True).strip()
def _release(self):
self._log.debug("Running release script")
cmd = [os.path.join(self.bp_dir, 'bin', 'release'),
self._ctx['BUILD_DIR']]
return check_output(" ".join(cmd),
stderr=subprocess.STDOUT,
shell=True).strip()
| [
"dmikusa@gopivotal.com"
] | dmikusa@gopivotal.com |
0a9e46657142adcba4376c195fd5c89f84670629 | 85ed63e6ae798cc26bb7db989912405c2675dbb2 | /simpleTest04Client_.py | 0c4cdf64475499e51798185a532224a138493103 | [
"MIT"
] | permissive | LaplaceKorea/APIClient | 3608011f81e432237ed8c047b28e6323db3121fa | e772482c3d9cbedee98f46a3529dca5acc254f3c | refs/heads/main | 2023-07-20T06:33:37.141537 | 2021-08-26T15:48:42 | 2021-08-26T15:48:42 | 380,518,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,113 | py | from LaplaceWSAPIClient import *
from MarkowitzSerde import *
from TargetSerde import *
from Operators import *
from TargetOperators import *
from RLStructure import *
from ClientConfig import client_config
query = RLQuery("default", datetime(2021,1,1), datetime(2021,1,21), {
"BankAccount": 100000.0,
"MMM":1.0,
"AA":1.0,
"AXP":1.0,
"BA":1.0,
"BAC":1.0,
"C":1.0,
"CAT":1.0,
"CVX":1.0,
"DD":1.0,
"DIS":1.0,
"GE":1.0,
"GM":1.0,
"HD":1.0,
"HPQ":1.0,
"IBM":1.0,
"JNJ":1.0,
"JPM":1.0,
"KO":1.0,
"MCD":1.0,
"MRK":1.0,
"PFE":1.0,
"PG":1.0,
"T":1.0,
"UTX":1.0,
"VZ":1.0,
"WMT":1.0,
"XOM":1.0
}, UserTokenSerde(client_config["user"],client_config["token"]))
performQueryRLQuery(client_config["wss"], query, lambda x: print("yahoo: ", x.Steps[0][0], x.Steps[0][1], x.Steps[0][203]))
| [
"renoir42@yahoo.com"
] | renoir42@yahoo.com |
2f2a1743222841ff34512aa1889a1587bd61b5ce | c759ca98768dd8fd47621e3aeda9069d4e0726c6 | /codewof/users/forms.py | 211e37a2e4797d3fa23af236d3215009c7f787c4 | [
"MIT"
] | permissive | lucyturn3r/codewof | 50fc504c3a539c376b3d19906e92839cadabb012 | acb2860c4b216013ffbba5476d5fac1616c78454 | refs/heads/develop | 2020-06-24T08:25:28.788099 | 2019-08-12T02:50:35 | 2019-08-12T02:50:35 | 198,912,987 | 0 | 0 | MIT | 2019-08-07T03:22:21 | 2019-07-25T23:17:17 | Python | UTF-8 | Python | false | false | 1,132 | py | """Forms for user application."""
from django.forms import ModelForm
from django.contrib.auth import get_user_model, forms
User = get_user_model()
class SignupForm(ModelForm):
"""Sign up for user registration."""
class Meta:
"""Metadata for SignupForm class."""
model = get_user_model()
fields = ['first_name', 'last_name']
def signup(self, request, user):
"""Extra logic when a user signs up.
Required by django-allauth.
"""
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.save()
class UserChangeForm(forms.UserChangeForm):
"""Form class for changing user."""
class Meta(forms.UserChangeForm.Meta):
"""Metadata for UserChangeForm class."""
model = User
fields = ('email', 'last_name')
class UserCreationForm(forms.UserCreationForm):
"""Form class for creating user."""
class Meta(forms.UserCreationForm.Meta):
"""Metadata for UserCreationForm class."""
model = User
fields = ('email', 'first_name', 'last_name')
| [
"jackmorgannz@gmail.com"
] | jackmorgannz@gmail.com |
943610c3817b120f676b87e3cbd7e064f2385257 | e5bf65908b04192d4ea540f8bf49f887d06c1195 | /Python_BootCamp_CE/ForLoop.py | 59dfba69479b57cd052bbd80dda1c6d59669a6c8 | [] | no_license | druplall/Python_BootCamp_CE | 06ac2b2721fbd21446b82d315f2e0c0438119d53 | 56fa82567ba364302ed350c15e30e5652e8deab2 | refs/heads/master | 2022-02-21T12:09:55.987315 | 2019-08-28T17:55:42 | 2019-08-28T17:55:42 | 197,800,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | # For Loop
mylist = [1,2,3,4,5,6,7,8,9,10]
for i in mylist:
if (i % 2 == 0):
print(i)
x = 0
while x < 5:
print(f'The Current value of x is {x}')
x = x+1
else:
print('X is not less than 5')
# What is the 'continue' statement ?
# Goes to the top of the closest enclosing loop
mystring = 'Deodat'
for i in mystring:
if i == 'E':
continue
print(i)
| [
"RUPLALLD@coned.com"
] | RUPLALLD@coned.com |
2049346c2d9e4a956951a4fa5b7244e5b807fbb8 | aff98325082a912c84471b7a505ab565175b0289 | /tests/test_progress.py | 2d3bd9a9fdaeb394b7f7af6b7cb5d68b7695ec3e | [
"MIT"
] | permissive | dbazhal/kopf | a10dde232095eaf7104f1623a1f6bfc33cb80363 | ac772f2d7ce1272f47f10ebff784f54a6ec8dcfa | refs/heads/master | 2020-06-15T03:51:57.055815 | 2020-01-13T14:23:29 | 2020-01-13T14:23:29 | 195,031,097 | 0 | 0 | MIT | 2019-07-03T10:21:51 | 2019-07-03T10:21:50 | null | UTF-8 | Python | false | false | 15,676 | py | import copy
import datetime
from unittest.mock import Mock
import freezegun
import pytest
from kopf.structs.status import (
is_started,
is_sleeping,
is_awakened,
is_finished,
get_start_time,
get_awake_time,
get_retry_count,
set_start_time,
set_awake_time,
set_retry_time,
store_failure,
store_success,
store_result,
purge_progress,
)
# Timestamps: time zero (0), before (B), after (A), and time zero+1s (1).
TSB = datetime.datetime(2020, 12, 31, 23, 59, 59, 000000)
TS0 = datetime.datetime(2020, 12, 31, 23, 59, 59, 123456)
TS1 = datetime.datetime(2021, 1, 1, 00, 00, 00, 123456)
TSA = datetime.datetime(2020, 12, 31, 23, 59, 59, 999999)
TSB_ISO = '2020-12-31T23:59:59.000000'
TS0_ISO = '2020-12-31T23:59:59.123456'
TS1_ISO = '2021-01-01T00:00:00.123456'
TSA_ISO = '2020-12-31T23:59:59.999999'
@pytest.fixture()
def handler():
return Mock(id='some-id', spec_set=['id'])
@pytest.mark.parametrize('expected, body', [
(False, {}),
(False, {'status': {}}),
(False, {'status': {'kopf': {}}}),
(False, {'status': {'kopf': {'progress': {}}}}),
(False, {'status': {'kopf': {'progress': {'etc-id': {}}}}}),
(True , {'status': {'kopf': {'progress': {'some-id': {}}}}}),
])
def test_is_started(handler, expected, body):
origbody = copy.deepcopy(body)
result = is_started(body=body, handler=handler)
assert result == expected
assert body == origbody # not modified
@pytest.mark.parametrize('expected, body', [
(False, {}),
(False, {'status': {}}),
(False, {'status': {'kopf': {}}}),
(False, {'status': {'kopf': {'progress': {}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'success': False}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'failure': False}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'success': None}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'failure': None}}}}}),
(True , {'status': {'kopf': {'progress': {'some-id': {'success': True}}}}}),
(True , {'status': {'kopf': {'progress': {'some-id': {'failure': True}}}}}),
])
def test_is_finished(handler, expected, body):
origbody = copy.deepcopy(body)
result = is_finished(body=body, handler=handler)
assert result == expected
assert body == origbody # not modified
@pytest.mark.parametrize('expected, body', [
# Everything that is finished is not sleeping, no matter the sleep/awake field.
(False, {'status': {'kopf': {'progress': {'some-id': {'success': True}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'failure': True}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'success': True, 'delayed': TS0_ISO}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'failure': True, 'delayed': TS0_ISO}}}}}),
# Everything with no sleep/awake field set is not sleeping either.
(False, {'status': {'kopf': {'progress': {'some-id': {}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'delayed': None}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'success': None}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'failure': None}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'success': None, 'delayed': None}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'failure': None, 'delayed': None}}}}}),
# When not finished and has awake time, the output depends on the relation to "now".
(False, {'status': {'kopf': {'progress': {'some-id': {'delayed': TS0_ISO}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'delayed': TS0_ISO, 'success': None}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'delayed': TS0_ISO, 'failure': None}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'delayed': TSB_ISO}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'delayed': TSB_ISO, 'success': None}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'delayed': TSB_ISO, 'failure': None}}}}}),
(True , {'status': {'kopf': {'progress': {'some-id': {'delayed': TSA_ISO}}}}}),
(True , {'status': {'kopf': {'progress': {'some-id': {'delayed': TSA_ISO, 'success': None}}}}}),
(True , {'status': {'kopf': {'progress': {'some-id': {'delayed': TSA_ISO, 'failure': None}}}}}),
])
@freezegun.freeze_time(TS0)
def test_is_sleeping(handler, expected, body):
origbody = copy.deepcopy(body)
result = is_sleeping(body=body, handler=handler)
assert result == expected
assert body == origbody # not modified
@pytest.mark.parametrize('expected, body', [
# Everything that is finished never awakens, no matter the sleep/awake field.
(False, {'status': {'kopf': {'progress': {'some-id': {'success': True}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'failure': True}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'success': True, 'delayed': TS0_ISO}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'failure': True, 'delayed': TS0_ISO}}}}}),
# Everything with no sleep/awake field is not sleeping, thus by definition is awake.
(True , {'status': {'kopf': {'progress': {'some-id': {}}}}}),
(True , {'status': {'kopf': {'progress': {'some-id': {'delayed': None}}}}}),
(True , {'status': {'kopf': {'progress': {'some-id': {'success': None}}}}}),
(True , {'status': {'kopf': {'progress': {'some-id': {'failure': None}}}}}),
(True , {'status': {'kopf': {'progress': {'some-id': {'success': None, 'delayed': None}}}}}),
(True , {'status': {'kopf': {'progress': {'some-id': {'failure': None, 'delayed': None}}}}}),
# When not finished and has awake time, the output depends on the relation to "now".
(True , {'status': {'kopf': {'progress': {'some-id': {'delayed': TS0_ISO}}}}}),
(True , {'status': {'kopf': {'progress': {'some-id': {'delayed': TS0_ISO, 'success': None}}}}}),
(True , {'status': {'kopf': {'progress': {'some-id': {'delayed': TS0_ISO, 'failure': None}}}}}),
(True , {'status': {'kopf': {'progress': {'some-id': {'delayed': TSB_ISO}}}}}),
(True , {'status': {'kopf': {'progress': {'some-id': {'delayed': TSB_ISO, 'success': None}}}}}),
(True , {'status': {'kopf': {'progress': {'some-id': {'delayed': TSB_ISO, 'failure': None}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'delayed': TSA_ISO}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'delayed': TSA_ISO, 'success': None}}}}}),
(False, {'status': {'kopf': {'progress': {'some-id': {'delayed': TSA_ISO, 'failure': None}}}}}),
])
@freezegun.freeze_time(TS0)
def test_is_awakened(handler, expected, body):
origbody = copy.deepcopy(body)
result = is_awakened(body=body, handler=handler)
assert result == expected
assert body == origbody # not modified
@pytest.mark.parametrize('expected, body', [
(None, {}),
(None, {'status': {}}),
(None, {'status': {'kopf': {}}}),
(None, {'status': {'kopf': {'progress': {}}}}),
(None, {'status': {'kopf': {'progress': {'some-id': {}}}}}),
(None, {'status': {'kopf': {'progress': {'some-id': {'delayed': None}}}}}),
(TS0, {'status': {'kopf': {'progress': {'some-id': {'delayed': TS0_ISO}}}}}),
])
def test_get_awake_time(handler, expected, body):
origbody = copy.deepcopy(body)
result = get_awake_time(body=body, handler=handler)
assert result == expected
assert body == origbody # not modified
@pytest.mark.parametrize('expected, body, patch', [
(None, {}, {}),
(None, {'status': {}}, {}),
(None, {'status': {'kopf': {}}}, {}),
(None, {'status': {'kopf': {'progress': {}}}}, {}),
(None, {'status': {'kopf': {'progress': {'some-id': {}}}}}, {}),
(None, {'status': {'kopf': {'progress': {'some-id': {'started': None}}}}}, {}),
(TS0, {'status': {'kopf': {'progress': {'some-id': {'started': TS0_ISO}}}}}, {}),
(None, {}, {'status': {}}),
(None, {}, {'status': {'kopf': {}}}),
(None, {}, {'status': {'kopf': {'progress': {}}}}),
(None, {}, {'status': {'kopf': {'progress': {'some-id': {}}}}}),
(None, {}, {'status': {'kopf': {'progress': {'some-id': {'started': None}}}}}),
(TS0, {}, {'status': {'kopf': {'progress': {'some-id': {'started': TS0_ISO}}}}}),
(TSB, # the patch has priority
{'status': {'kopf': {'progress': {'some-id': {'started': TSA_ISO}}}}},
{'status': {'kopf': {'progress': {'some-id': {'started': TSB_ISO}}}}}),
])
def test_get_start_time(handler, expected, body, patch):
origbody = copy.deepcopy(body)
origpatch = copy.deepcopy(patch)
result = get_start_time(body=body, patch=patch, handler=handler)
assert result == expected
assert body == origbody # not modified
assert patch == origpatch # not modified
@pytest.mark.parametrize('expected, body', [
(0, {}),
(0, {'status': {}}),
(0, {'status': {'kopf': {'progress': {}}}}),
(0, {'status': {'kopf': {'progress': {'some-id': {}}}}}),
(0, {'status': {'kopf': {'progress': {'some-id': {'retries': None}}}}}),
(6, {'status': {'kopf': {'progress': {'some-id': {'retries': 6}}}}}),
])
def test_get_retry_count(handler, expected, body):
origbody = copy.deepcopy(body)
result = get_retry_count(body=body, handler=handler)
assert result == expected
assert body == origbody # not modified
@pytest.mark.parametrize('body, expected', [
({}, {'status': {'kopf': {'progress': {'some-id': {'started': TS0_ISO}}}}}),
])
@freezegun.freeze_time(TS0)
def test_set_start_time(handler, expected, body):
origbody = copy.deepcopy(body)
patch = {}
set_start_time(body=body, patch=patch, handler=handler)
assert patch == expected
assert body == origbody # not modified
@pytest.mark.parametrize('body, delay, expected', [
({}, None, {'status': {'kopf': {'progress': {'some-id': {'delayed': None}}}}}),
({}, 0, {'status': {'kopf': {'progress': {'some-id': {'delayed': TS0_ISO}}}}}),
({}, 1, {'status': {'kopf': {'progress': {'some-id': {'delayed': TS1_ISO}}}}}),
])
@freezegun.freeze_time(TS0)
def test_set_awake_time(handler, expected, body, delay):
origbody = copy.deepcopy(body)
patch = {}
set_awake_time(body=body, patch=patch, handler=handler, delay=delay)
assert patch == expected
assert body == origbody # not modified
@pytest.mark.parametrize('body, delay, expected', [
({}, None,
{'status': {'kopf': {'progress': {'some-id': {'retries': 1, 'delayed': None}}}}}),
({}, 0,
{'status': {'kopf': {'progress': {'some-id': {'retries': 1, 'delayed': TS0_ISO}}}}}),
({}, 1,
{'status': {'kopf': {'progress': {'some-id': {'retries': 1, 'delayed': TS1_ISO}}}}}),
({'status': {'kopf': {'progress': {'some-id': {'retries': None}}}}}, None,
{'status': {'kopf': {'progress': {'some-id': {'retries': 1, 'delayed': None}}}}}),
({'status': {'kopf': {'progress': {'some-id': {'retries': None}}}}}, 0,
{'status': {'kopf': {'progress': {'some-id': {'retries': 1, 'delayed': TS0_ISO}}}}}),
({'status': {'kopf': {'progress': {'some-id': {'retries': None}}}}}, 1,
{'status': {'kopf': {'progress': {'some-id': {'retries': 1, 'delayed': TS1_ISO}}}}}),
({'status': {'kopf': {'progress': {'some-id': {'retries': 5}}}}}, None,
{'status': {'kopf': {'progress': {'some-id': {'retries': 6, 'delayed': None}}}}}),
({'status': {'kopf': {'progress': {'some-id': {'retries': 5}}}}}, 0,
{'status': {'kopf': {'progress': {'some-id': {'retries': 6, 'delayed': TS0_ISO}}}}}),
({'status': {'kopf': {'progress': {'some-id': {'retries': 5}}}}}, 1,
{'status': {'kopf': {'progress': {'some-id': {'retries': 6, 'delayed': TS1_ISO}}}}}),
])
@freezegun.freeze_time(TS0)
def test_set_retry_time(handler, expected, body, delay):
origbody = copy.deepcopy(body)
patch = {}
set_retry_time(body=body, patch=patch, handler=handler, delay=delay)
assert patch == expected
assert body == origbody # not modified
@pytest.mark.parametrize('body, expected', [
({},
{'status': {'kopf': {'progress': {'some-id': {'stopped': TS0_ISO,
'failure': True,
'retries': 1,
'message': 'some-error'}}}}}),
({'status': {'kopf': {'progress': {'some-id': {'retries': 5}}}}},
{'status': {'kopf': {'progress': {'some-id': {'stopped': TS0_ISO,
'failure': True,
'retries': 6,
'message': 'some-error'}}}}}),
])
@freezegun.freeze_time(TS0)
def test_store_failure(handler, expected, body):
origbody = copy.deepcopy(body)
patch = {}
store_failure(body=body, patch=patch, handler=handler, exc=Exception("some-error"))
assert patch == expected
assert body == origbody # not modified
@pytest.mark.parametrize('result, body, expected', [
# With no result, it updates only the progress.
(None,
{},
{'status': {'kopf': {'progress': {'some-id': {'stopped': TS0_ISO,
'success': True,
'retries': 1,
'message': None}}}}}),
(None,
{'status': {'kopf': {'progress': {'some-id': {'retries': 5}}}}},
{'status': {'kopf': {'progress': {'some-id': {'stopped': TS0_ISO,
'success': True,
'retries': 6,
'message': None}}}}}),
# With the result, it updates also the status.
({'field': 'value'},
{},
{'status': {'kopf': {'progress': {'some-id': {'stopped': TS0_ISO,
'success': True,
'retries': 1,
'message': None}}},
'some-id': {'field': 'value'}}}),
({'field': 'value'},
{'status': {'kopf': {'progress': {'some-id': {'retries': 5}}}}},
{'status': {'kopf': {'progress': {'some-id': {'stopped': TS0_ISO,
'success': True,
'retries': 6,
'message': None}}},
'some-id': {'field': 'value'}}}),
])
@freezegun.freeze_time(TS0)
def test_store_success(handler, expected, body, result):
origbody = copy.deepcopy(body)
patch = {}
store_success(body=body, patch=patch, handler=handler, result=result)
assert patch == expected
assert body == origbody # not modified
@pytest.mark.parametrize('result, expected', [
(None,
{}),
({'field': 'value'},
{'status': {'some-id': {'field': 'value'}}}),
('string',
{'status': {'some-id': 'string'}}),
])
def test_store_result(handler, expected, result):
patch = {}
store_result(patch=patch, handler=handler, result=result)
assert patch == expected
@pytest.mark.parametrize('body', [
({}),
({'status': {'kopf': {'progress': {'some-id': {'retries': 5}}}}}),
])
def test_purge_progress(body):
origbody = copy.deepcopy(body)
patch = {}
purge_progress(body=body, patch=patch)
assert patch == {'status': {'kopf': {'progress': None}}}
assert body == origbody # not modified
| [
"sergey.vasilyev@zalando.de"
] | sergey.vasilyev@zalando.de |
3b08fd39d483dd905b289f19b47dcc45bbbd0504 | c35eaed6e8d6a6e7b3e1194c79f77c04540c6fc6 | /Clone_14/Evaluation/toy_problem.py | 03f32b4db9d8a56623b97613f059000d09498075 | [] | no_license | vishwesh5/OpenMLCPython_v-0.0.5 | 847a48da1f4140398d285cd809590411905260db | 1e690133e2dc70358d89927d95d6ccf81df0af3b | refs/heads/master | 2021-01-21T11:01:01.717518 | 2017-08-24T15:38:10 | 2017-08-24T15:38:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,774 | py | # -*- coding: utf-8 -*-
# MLC (Machine Learning Control): A genetic algorithm library to solve chaotic problems
# Copyright (C) 2015-2017, Thomas Duriez (thomas.duriez@gmail.com)
# Copyright (C) 2015, Adrian Durán (adrianmdu@gmail.com)
# Copyright (C) 2015-2017, Ezequiel Torres Feyuk (ezequiel.torresfeyuk@gmail.com)
# Copyright (C) 2016-2017, Marco Germano Zbrun (marco.germano@intraway.com)
# Copyright (C) 2016-2017, Raúl Lopez Skuba (raulopez0@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
# -*- coding: utf-8 -*-
import numpy as np
import MLC.Log.log as lg
import matplotlib.pyplot as plt
import random
import sys
import time
import csv
# import pandas as pd
from MLC.arduino.protocol import ArduinoUserInterface
from MLC.mlc_parameters.mlc_parameters import Config
from PyQt5.QtCore import Qt
def individual_data(indiv):
global g_data
# ==============================================================================
# SAMPLES = 201
# x = np.linspace(-10.0, 10.0, num=SAMPLES)
# y = np.tanh(4*x)
# ==============================================================================
# ==============================================================================
# My Code to import features from the building data
# dataset = pd.read_csv('/home/etorres/harsh.csv', delimiter='\t')
try:
if g_data is None:
pass
except NameError:
g_data = None
with open('/home/htomar/Half_Dataset.csv', 'r') as f:
csv_reader = csv.reader(f, delimiter='\t')
for row in csv_reader:
if g_data is None:
g_data = [[] for x in xrange(len(row))]
for index in xrange(len(row)):
g_data[index].append(float(row[index]))
x0 = np.array(g_data[0]) # Time
x1 = np.array(g_data[1]) # Temperature
x2 = np.array(g_data[2]) # Wind
x3 = np.array(g_data[3]) # Solar
x4 = np.array(g_data[4]) # Humidity
x5 = np.array(g_data[9]) # IsHoliday
x6 = np.array(g_data[8]) # Day of the Week
x7 = np.array(g_data[10])
y = np.array(g_data[5]) # Whole Building Energy
# print "x0: {}".format(type(x0))
# print "x1: {}".format(type(x1))
# print "x2: {}".format(type(x2))
# print "x3: {}".format(type(x3))
# print "x4: {}".format(type(x4))
# print "x5: {}".format(type(x5))
# print "x6: {}".format(type(x6))
# print "x0: {}".format(x0)
# print "x1: {}".format(x1)
# print "x2: {}".format(x2)
# print "x3: {}".format(x3)
# print "x4: {}".format(x4)
# print "x5: {}".format(x5)
# print "x6: {}".format(x6)
# print "x6: {}".format(x6)
# print "y: {}".format(y)
# ==============================================================================
# ==============================================================================
# DON'T NEED TO ADD NOISE
# config = Config.get_instance()
# artificial_noise = config.getint('EVALUATOR', 'artificialnoise')
# y_with_noise = y + [random.random() / 2 - 0.25 for _ in xrange(SAMPLES)] + artificial_noise * 500
#
# ==============================================================================
# ==============================================================================
# if isinstance(indiv.get_formal(), str):
# formal = indiv.get_formal().replace('S0', 'x')
# else:
# # toy problem support for multiple controls
# formal = indiv.get_formal()[0].replace('S0', 'x')
# ==============================================================================
# ==============================================================================
# My definition for formal
# TODO: This could be wrong. Check this line first
# formal: matlab interpretable expression of the individual
if isinstance(indiv.get_formal(), str):
formal = indiv.get_formal().replace('S0',
'x0') # Replacing S0 with x after obtaining the interpretable expression
formal = formal.replace('S1', 'x1')
formal = formal.replace('S2', 'x2')
formal = formal.replace('S3', 'x3')
formal = formal.replace('S4', 'x4')
formal = formal.replace('S5', 'x5')
formal = formal.replace('S6', 'x6')
formal = formal.replace('S7', 'x7')
else:
# toy problem support for multiple controls
formal = indiv.get_formal()[0].replace('S0', 'x0') # Should all of them be [0]? Mostly not. And this can be compressed of course
formal = formal.replace('S1', 'x1')
formal = formal.replace('S2', 'x2')
formal = formal.replace('S3', 'x3')
formal = formal.replace('S4', 'x4')
formal = formal.replace('S5', 'x5')
formal = formal.replace('S7', 'x7')
formal = formal.replace('S6', 'x6')
# ==============================================================================
# Calculate J like the sum of the square difference of the
# functions in every point
lg.logger_.debug('[POP][TOY_PROBLEM] Individual Formal: ' + formal)
b = indiv.get_tree().calculate_expression([x0, x1, x2, x3, x4, x5, x6, x7])
# print b
# If the expression doesn't have the term 'x',
# the eval returns a value (float) instead of an array.
# In that case transform it to an array
# ==============================================================================
# if type(b) == float:
# b = np.repeat(b, SAMPLES)
#
# return x, y, y_with_noise, b
# ==============================================================================
return x0, x1, x2, x3, x4, x5, x6,x7, y, b
def cost(indiv):
# x, y, y_with_noise, b = individual_data(indiv)
x0, x1, x2, x3, x4, x5, x6,x7, y, b = individual_data(indiv)
# Deactivate the numpy warnings, because this sum could raise an overflow
# Runtime warning from time to time
np.seterr(all='ignore')
# print "b: {}".format(b)
# print "y: {}".format(y)
# print "b: {}".format(type(b))
# print "y: {}".format(type(y))
array_size = 1
try:
array_size = b.size
except AttributeError:
pass
cost_value = float(np.sum((b - y) ** 2)) / array_size
np.seterr(all='warn')
return cost_value
# ==============================================================================
def show_best(index, generation, indiv, cost, block=True):
# #x, y, y_with_noise, b = individual_data(indiv)
x0, x1, x2, x3, x4, x5, x6,x7, y, b = individual_data(indiv)
#
x = np.linspace(0, y.size-1, num=y.size)
#mean_squared_error = np.sqrt((y - b)**2 / (1 + np.absolute(x**2)))
mean_squared_error = y - b # This is just mean error
# Put figure window on top of all other windows
fig = plt.figure()
fig.canvas.manager.window.setWindowModality(Qt.ApplicationModal)
fig.canvas.manager.window.setWindowTitle("Best Individual")
formal = None
if type(indiv.get_formal()) == list:
formal = indiv.get_formal()[0]
else:
formal = indiv.get_formal()
plt.rc('font', family='serif')
plt.suptitle("Generation N#{0} - Individual N#{1}\n"
"Cost: {2}\n Formal: {3}".format(generation,
index,
cost,
formal),
fontsize=12)
plt.subplot(2, 1, 1)
line1, = plt.plot(x, y, color='r', linewidth=2, label='Curve without noise')
line3, = plt.plot(x, b, color='k', linewidth=2, label='Control Law (Individual)')
plt.ylabel('Functions', fontsize=12, fontweight='bold')
plt.xlabel('Samples', fontsize=12, fontweight='bold')
plt.legend(handles=[line1, line3], loc=2)
plt.grid(True)
plt.subplot(2, 1, 2)
plt.plot(x, mean_squared_error, '*r')
plt.ylabel('Mean Squared Error', fontsize=12, fontweight='bold')
plt.xlabel('Samples', fontsize=12, fontweight='bold')
plt.grid(True)
plt.yscale('log')
plt.show(block=block)
| [
"tomar.gatech@gmail.com"
] | tomar.gatech@gmail.com |
bf4a47174593f4b7bd04435e3ac6c004011aa50c | e0a0385f142ca60cc931395736fcf1674163a79a | /Validation/Validate_MNIST.py | 9c1ec82caea8bd9e32dbcc2bb84987e184ea7ffd | [] | no_license | Erwin-rella/DeepDetector | 6e5d7055d6056f9bfb1b5a6479ba4454012421d8 | 3f427037b1a78c99231e776a3b6c8758101ede48 | refs/heads/master | 2023-03-16T06:27:04.057765 | 2018-06-03T16:12:28 | 2018-06-03T16:12:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,039 | py |
# coding: utf-8
# In[1]:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import keras
from keras import backend
import tensorflow as tf
from tensorflow.python.platform import flags
from cleverhans.utils_mnist import data_mnist
from cleverhans.utils_tf import model_train, model_eval, model_argmax
from cleverhans.attacks import FastGradientMethod
from cleverhans.utils import AccuracyReport
from cleverhans.utils_keras import cnn_model
from cleverhans.utils_keras import KerasModelWrapper
import time
import matplotlib.pyplot as plt
import math
FLAGS = flags.FLAGS
# In[2]:
def normalization(image_data):
image_data[image_data<0] = 0
image_data[image_data>1.0] = 1.0
def scalarQuantization(inputDigit, interval, left=True):
retDigit = inputDigit*255
retDigit//=interval
retDigit*=interval
if not left:
halfInterval = interval//2
retDigit+=(halfInterval)
retDigit/=255.0
return retDigit
def oneDEntropy(inputDigit):
expandDigit = np.array(inputDigit*255,dtype=np.int16)
f = np.zeros(256)
for i in range(28):
for j in range(28):
f[expandDigit[i][j]]+=1
f/=784.0
H = 0
for i in range(256):
if f[i] > 0:
H+=f[i]*math.log(f[i],2)
return -H
def crossMeanFilterOperations(inputDigit, start, end, coefficient):
retDigit = np.array(inputDigit, dtype=np.float32)
for row in xrange(start, end):
for col in xrange(start, end):
temp0 = inputDigit[row][col]
for i in range(1,start+1):
temp0+=inputDigit[0][row-i][col]
temp0+=inputDigit[0][row+i][col]
temp0+=inputDigit[0][row][col-i]
temp0+=inputDigit[0][row][col+i]
retDigit[row][col] = temp0/coefficient
return retDigit
def chooseCloserFilter(original_data,filter_data1,filter_data2):
result_data=np.zeros_like(original_data)
for j in range(28):
for k in range(28):
a=abs(filter_data1[j][k]-original_data[j][k])
b=abs(filter_data2[j][k]-original_data[j][k])
if(a<b):
result_data[j][k]=filter_data1[j][k]
else:
result_data[j][k]=filter_data2[j][k]
return result_data
def my_model_argmax(sess, x, predictions, samples):
feed_dict = {x: samples}
probabilities = sess.run(predictions, feed_dict)
return np.reshape(probabilities,10)
# if samples.shape[0] == 1:
# return np.argmax(probabilities)
# else:
# return np.argmax(probabilities, axis=1)
# In[3]:
#validation
def mnist_tutorial(train_start=0, train_end=60000, test_start=0,
test_end=10000, nb_epochs=6, batch_size=128,
learning_rate=0.001, train_dir="/tmp",
filename="mnist.ckpt", load_model=False,
testing=False):
keras.layers.core.K.set_learning_phase(0)
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
if not hasattr(backend, "tf"):
raise RuntimeError("This tutorial requires keras to be configured"
" to use the TensorFlow backend.")
if keras.backend.image_dim_ordering() != 'tf':
keras.backend.set_image_dim_ordering('tf')
print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' to "
"'th', temporarily setting to 'tf'")
# Create TF session and set as Keras backend session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
keras.backend.set_session(sess)
# Get MNIST test data
X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start,
train_end=train_end,
test_start=test_start,
test_end=test_end)
# Use label smoothing
assert Y_train.shape[1] == 10
label_smooth = .1
Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
y = tf.placeholder(tf.float32, shape=(None, 10))
# Define TF model graph
model = cnn_model()
predictions = model(x)
print("Defined TensorFlow model graph.")
def evaluate():
# Evaluate the accuracy of the MNIST model on legitimate test examples
eval_params = {'batch_size': batch_size}
acc = model_eval(sess, x, y, predictions, X_test, Y_test, args=eval_params)
report.clean_train_clean_eval = acc
assert X_test.shape[0] == test_end - test_start, X_test.shape
print('Test accuracy on legitimate examples: %0.4f' % acc)
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate,
'train_dir': train_dir,
'filename': filename
}
# Train an MNIST model
ckpt = tf.train.get_checkpoint_state(train_dir)
ckpt_path = False if ckpt is None else ckpt.model_checkpoint_path
rng = np.random.RandomState([2017, 8, 30])
if load_model and ckpt_path:
saver = tf.train.Saver()
saver.restore(sess, ckpt_path)
print("Model loaded from: {}".format(ckpt_path))
else:
print("Model was not loaded, training from scratch.")
model_train(sess, x, y, predictions, X_train, Y_train, evaluate=evaluate,
args=train_params, save=True, rng=rng)
advGenTimeStart = time.time()
wrap = KerasModelWrapper(model)
advGenTimeStart = time.time()
fgsm = FastGradientMethod(wrap, sess=sess)
fgsm_params = {'eps': 0.2,
'clip_min': 0.,
'clip_max': 1.}
adv_x = fgsm.generate(x, **fgsm_params)
adv_x = sess.run(adv_x, feed_dict={x: X_test[4500:5500]})
advGenTimeEnd = time.time()
advGenTime = advGenTimeEnd-advGenTimeStart
for i in xrange(1000):
normalization(adv_x[i:(i+1)])
original_classified_wrong_number = 0
disturbed_failure_number = 0
NbLowEntropy = 0
NbMidEntropy = 0
NbHighEntropy = 0
lowTP = 0
lowFN = 0
lowFP = 0
midTP = 0
midFN = 0
midFP = 0
highTP = 0
highFN = 0
highFP = 0
for i in range(len(adv_x)):
current_class = int(np.argmax(Y_test[4500+i]))
oriPreTimeStart = time.time()
currentXLabel = model_argmax(sess,x,predictions,X_test[i+4500:(i+4501)])
currentXProbList = my_model_argmax(sess,x,predictions,X_test[i+4500:(i+4501)])
oriPreTimeEnd = time.time()
oriPreTime = oriPreTimeEnd-oriPreTimeStart
if currentXLabel != current_class:
original_classified_wrong_number+=1
continue
advPreTimeStart = time.time()
currentAdvXLabel = model_argmax(sess,x,predictions,adv_x[i:(i+1)])
currentAdvXProbList = my_model_argmax(sess,x,predictions,adv_x[i:(i+1)])
advPreTimeEnd = time.time()
advPreTime = advPreTimeEnd-advPreTimeStart
if currentAdvXLabel == currentXLabel:
disturbed_failure_number+=1
continue
tempX = np.reshape(X_test[i+4500:(i+4501)], (28,28))
test_x = np.array(tempX)
oriFilteredPreTimeStart = time.time()
currentX = np.reshape(X_test[i+4500:(i+4501)], (28,28))
imageEntropy = oneDEntropy(test_x)
if imageEntropy < 4:
NbLowEntropy+=1
current_x_res = scalarQuantization(currentX,128)
current_x_res = np.reshape(current_x_res, X_test[0:1].shape)
current_x_res_label = model_argmax(sess,x,predictions,current_x_res)
if current_x_res_label != current_class:
lowFP+=1
elif imageEntropy < 5:
NbMidEntropy+=1
current_x_res = scalarQuantization(currentX,64)
current_x_res = np.reshape(current_x_res, X_test[0:1].shape)
current_x_res_label = model_argmax(sess,x,predictions,current_x_res)
if current_x_res_label != current_class:
midFP+=1
else:
NbHighEntropy+=1
current_x_res = scalarQuantization(currentX,43)
current_x_res = np.reshape(current_x_res, X_test[0:1].shape)
current_x_res_label = model_argmax(sess,x,predictions,current_x_res)
if current_x_res_label != current_class:
highFP+=1
tempX2 = np.reshape(adv_x[i:(i+1)], (28,28))
test_adv_x = np.array(tempX2)
currentAdvX = np.reshape(adv_x[i:(i+1)], (28,28))
imageEntropy2 = oneDEntropy(test_adv_x)
print('%d: %.2f------%.2f' % (i, imageEntropy,imageEntropy2))
if imageEntropy2 < 4:
NbLowEntropy+=1
current_adv_x_res = scalarQuantization(currentAdvX,128)
current_adv_x_res = np.reshape(current_adv_x_res, X_test[0:1].shape)
current_adv_x_res_label = model_argmax(sess,x,predictions,current_adv_x_res)
if current_adv_x_res_label != currentAdvXLabel:
lowTP+=1
else:
lowFN+=1
elif imageEntropy2 < 5:
NbMidEntropy+=1
current_adv_x_res = scalarQuantization(currentAdvX,64)
current_adv_x_res = np.reshape(current_adv_x_res, X_test[0:1].shape)
current_adv_x_res_label = model_argmax(sess,x,predictions,current_adv_x_res)
if current_adv_x_res_label != currentAdvXLabel:
midTP+=1
else:
highFN+=1
else:
NbHighEntropy+=1
current_adv_x_res = scalarQuantization(currentAdvX,43)
current_adv_x_res = np.reshape(current_adv_x_res, X_test[0:1].shape)
current_adv_x_res_label = model_argmax(sess,x,predictions,current_adv_x_res)
if current_adv_x_res_label != currentAdvXLabel:
highTP+=1
else:
highFN+=1
str1 = '%d-%d' % (original_classified_wrong_number,disturbed_failure_number)
lowstr = '%d : lowTP = %d; lowFN = %d; lowFP = %d' % (NbLowEntropy,lowTP,lowFN,lowFP)
midstr = '%d : midTP = %d; midFN = %d; midFP = %d' % (NbMidEntropy,midTP,midFN,midFP)
highstr = '%d : highTP = %d; highFN = %d; highFP = %d' % (NbHighEntropy,highTP,highFN,highFP)
print(str1)
print(lowstr)
print(midstr)
print(highstr)
lowRecall=lowTP*1.0/(lowTP+lowFN)
lowPrecision=lowTP*1.0/(lowTP+lowFP)
midRecall=midTP*1.0/(midTP+midFN)
midPrecision=midTP*1.0/(midTP+midFP)
highRecall=highTP*1.0/(highTP+highFN)
highPrecision=highTP*1.0/(highTP+highFP)
print ("lowRecall: ",lowRecall)
print ("lowPrecision: ",lowPrecision)
print ("midRecall: ",midRecall)
print ("midPrecision: ",midPrecision)
print ("highRecall: ",highRecall)
print ("highPrecision: ",highPrecision)
# In[4]:
def main(argv=None):
mnist_tutorial(nb_epochs=FLAGS.nb_epochs,
batch_size=FLAGS.batch_size,
learning_rate=FLAGS.learning_rate,
train_dir=FLAGS.train_dir,
filename=FLAGS.filename,
load_model=FLAGS.load_model)
if __name__ == '__main__':
flags.DEFINE_integer('nb_epochs', 6, 'Number of epochs to train model')
flags.DEFINE_integer('batch_size', 128, 'Size of training batches')
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')
flags.DEFINE_string('train_dir', '/tmp', 'Directory where to save model.')
flags.DEFINE_string('filename', 'mnist.ckpt', 'Checkpoint filename.')
flags.DEFINE_boolean('load_model', True, 'Load saved model or train.')
tf.app.run()
| [
"noreply@github.com"
] | noreply@github.com |
61b289a049925e33c432f9de32552768370f1765 | 0db42f51da16217e1436e8b2a25c2105e6807370 | /scrapy/enterprise/utils/__init__.py | d0861ddfc140ae92f26d18ff74941ca17b75dc99 | [
"Apache-2.0"
] | permissive | parker-pu/enterprise | b863e1d4625dd6cdc6d555bedd47ce7511b345b8 | 386f4f05948e17589e84263b5451b0f3b958135f | refs/heads/master | 2023-01-13T15:09:28.944845 | 2019-02-02T10:23:40 | 2019-02-02T10:23:40 | 167,884,746 | 1 | 0 | Apache-2.0 | 2023-01-03T16:17:19 | 2019-01-28T02:15:45 | Python | UTF-8 | Python | false | false | 89 | py | # encoding: utf-8
"""
@version: v1.0
@author: pu_yongjun
@license: Apache Licence
"""
| [
"i54605@outlook.com"
] | i54605@outlook.com |
2aca7809500d412cbb00feadb849529834481c51 | 3beaaf786f8cda63a120c9e9c1591fd5a77a087e | /4-matplotlib/py_chart.py | f0bc32734ebe81b46dad8896cd969d302b8de434 | [] | no_license | masun77/02-DataVis-5Ways | f9859626eea735305409e313fe5b857dfd4b8708 | e01df27782f2a7c5c69132a03b8c4b57367a5f80 | refs/heads/main | 2023-03-01T00:10:02.257639 | 2021-02-13T04:39:21 | 2021-02-13T04:39:21 | 338,385,284 | 0 | 0 | null | 2021-02-13T04:40:52 | 2021-02-12T17:22:58 | HTML | UTF-8 | Python | false | false | 579 | py | import pandas as pd
import matplotlib.pyplot as plt
cars = pd.read_csv('cars-py.csv', sep=',')
colors = {'bmw': 'red', 'ford':'green','honda':'brown','mercedes':'blue','toyota':'purple'}
fig, ax = plt.subplots()
plt.xticks([2000,3000,4000,5000],['2000','3000','4000','5000'])
plt.yticks([10,20,30,40],['10','20','30','40'])
ax.scatter(cars.Weight, cars.MPG, c=cars.Manufacturer.map(colors), s=cars.WeightSq * .00001, alpha=0.5)
ax.set_xlabel("Weight", fontsize=15)
ax.set_ylabel("MPG", fontsize=15)
ax.set_title('Matplotlib Chart')
ax.grid(True)
fig.tight_layout()
plt.show() | [
"masun@wpi.edu"
] | masun@wpi.edu |
ef20bd88e4759476dcc75c3f6b7922cfabf9032f | 1e7ce1c56f3030aa6df1e928bab559f50c59bad5 | /helper/bot_manager.py | e3e802244de0a4ed2bbaeaf01f2ee440f75652ae | [] | no_license | AIRob/WxRobot | f7fe37331c399a9d7fb467c7e913f10cc981f8eb | b27a48edb44694d4faa349d68d9b753fe4063276 | refs/heads/master | 2020-06-05T04:53:11.310909 | 2019-05-17T06:46:30 | 2019-05-17T06:46:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,571 | py | import json
import base64
from threading import Thread
from multiprocessing import Process
import time
from wxpy import *
from databases import models
from . import plugs_manager
import re
import requests
from multiprocessing import Pipe
import os
import threading
from helper.channels_manager import cm
from .plugs_manager import plugs_management as pm
from .debug import debug
# from wordcloud import WordCloud
# import jieba
# import matplotlib.pyplot as plt
# import numpy as np
# from PIL import Image
class Data_analysis(Thread):
def __init__(self, Bot ,callback_analysis_result,username):
super().__init__()
self.Bot = Bot
self.puid = Bot.user_details(Bot.self).puid
self.Bot.result = None
self.callback_analysis_result = callback_analysis_result
self.username = username
def run(self):
# 获取所有好友
friends = self.Bot.friends(update=True)
# 获取好友的数量
friends_count = len(friends[1:])
# 获取群聊数量
# 一些不活跃的群可能无法被获取到,可通过在群内发言,或修改群名称的方式来激活
groups_count = len(self.Bot.groups(update=True))
# 获取公众号数量
msp_count = len(self.Bot.mps(update=True))
# 获取所有人的性别
gender_statistics = {'male': len(friends.search(sex=MALE)), 'female': len(
friends.search(sex=FEMALE)), 'secrecy': len(friends.search(sex=None))}
# 获取所有人的个性签名
signatures = {i.name: i.signature for i in friends}
# 创建词云
# world_cloud = self.create_world_cloud(signatures,'/home/tarena/WxRobot/homepage/static/img/color_mask.jpg')
# 获取所有人的所在城市
region = {f.name: f.province for f in friends}
result_data = {
'friends_count': friends_count,
'groups_count': groups_count,
'msp_count': msp_count,
# 'world_cloud':world_cloud,
'gender_statistics': gender_statistics,
'region': region
}
# print(result_data)
self.callback_analysis_result(result_data,self.username)
def create_world_cloud(self, text, img_path):
text = text
color_mask_path = img_path
cut_text = " ".join(jieba.cut(" ".join(text)))
color_mask = np.array(Image.open(color_mask_path))
cloud = WordCloud(
# 设置字体,不指定就会出现乱码
# font_path=" C:\\Windows\\Fonts\\STXINGKA.TTF",
# 设置背景色
background_color='white',
# 词云形状
mask=color_mask,
# 允许最大词汇
max_words=2000,
# 最大号字体
max_font_size=40,
)
wCloud = cloud.generate(cut_text)
# 返回生成好词云对象
world_cloud = wCloud.to_image().tobytes()
return world_cloud
# return base64.b64encode(world_cloud).decode()
class Create_world_cloud(Thread):
def __init__(self, text, img_path):
"""
功能:将text按照img的形状做呈现出词云
:param text 需要呈现的文字,词组
:param color_mask_path 参照图路径地址
:return 制作完成的bytes格式图片
"""
super().__init__()
self.text = text
self.img_path = img_path
self.world_cloud = None
def run():
text = self.text
color_mask_path = self.img_path
cut_text = " ".join(jieba.cut(" ".join(text)))
color_mask = np.array(Image.open(color_mask_path))
cloud = WordCloud(
# 设置字体,不指定就会出现乱码
# font_path=" C:\\Windows\\Fonts\\STXINGKA.TTF",
# 设置背景色
background_color='white',
# 词云形状
mask=color_mask,
# 允许最大词汇
max_words=2000,
# 最大号字体
max_font_size=40,
)
wCloud = cloud.generate(cut_text)
# 返回生成好词云对象
self.world_cloud = wCloud.to_image()
def get_bytes_cloud(self):
'''
:return bytest格式的词云图片
'''
if self.world_cloud:
return self.world_cloud.tobytes()
else:
return None
def get_str_cloud(self):
'''
:return str格式的词云图片
'''
if self.world_cloud:
image = self.world_cloud.tobytes()
return self.imageToStr(image)
else:
return None
def imageToStr(self, image):
# 先将图片转换位byte类型,然后再转换为str
image_str = base64.b64encode(image).decode('ascii')
return image_str
class Robot_management():
def __init__(self):
self.robots = {}
def get_basic_data(self, puid , username):
"""
初始化登陆者的详细信息
:param bot_uuid 机器人的uuid标识符
:return 名称、头像,微信ID
"""
bot = self.get_bot(puid)
if not bot:
return None
try:
print("get_basic_data:-----------------------------",bot)
# 获取登陆者的详细信息
user_details = bot.user_details(bot.self)
user = models.UserInfo.objects.get(username = username)
# 获取插件用户所拥有插件信息
plug_querys = user.userplugs_set.all()
user_plugs = [plug_query for plug_query in plug_querys if plug_query.plug.isActive]
plug_all = models.Plugs.objects.filter(isActive = True).all()
plug_shops = [plug for plug in plug_all]
print("plug_shops",plug_shops)
print("plugs",user_plugs)
# 获取用户的定时发送信息
regularlysend_info = {
'timer':user.timer,
'text':user.text,
'repetition':user.repetition,
'timer_send_isActive':user.timer_send_isActive
}
details={
# 微信名称
'user_name':user_details.name,
# 微信头像
'avatar':base64.b64encode(user_details.get_avatar()).decode() ,
# 微信ID号
'status':'正常',
# 性别
'sex' : user_details.sex,
# 省份
'province' : user_details.province,
# 城市
'city' : user_details.city,
# 个性签名
'signature' : user_details.signature,
# 用户的插件
'user_plugs':user_plugs,
# 插件商店
'plug_shops':plug_shops,
# 当前登录的用户名
'username':username,
# 消息提示语
'clues':user.clues,
# 当前用户的定时发送信息
'regularlysend_info':regularlysend_info,
}
# print("登录这的基本信息如下:",details)
return details
except:
return None
def start_data_analysis(self,puid,username):
"""
数据分析入口函数
"""
print("开始进行数据分析")
bot = self.get_bot(puid)
data_analysis = Data_analysis(bot,self.callback_analysis_result,username = username)
data_analysis.start()
def callback_analysis_result(self,data,username):
"""
数据分析完成后的回调函数
"""
for _ in range(3):
cm.reply_channel_send(username,{
'analysis_result':data
}
)
time.sleep(2)
def get_data_intelligent(self,puid,username,data_intelligent=None):
"""
同步的方式获取好友和群组信息
"""
#已被选中的好友
select_friends =[f.fid for f in models.SelectedFriends.objects.all()]
#已被选中的群组
select_groups = [g.gid for g in models.SelectedGroups.objects.all()]
# print(dir(select_friends),select_groups,sep="\n")
print("正在:同步的方式获取好友和群组信息")
bot = self.get_bot(puid)
# 获取登陆者的好友和群组的详细信息
groups = bot.groups(update = True)
group_infos = []
for group in groups:
group.update_group(True)
gname = group.name
# print("群名称:",gname)
gowner = group.owner.name #群主
# print("群主:",gowner)
#所有群成员
members = group.members
# print("群内成员:",group.members)
# 统计性别
mtfratio = {'male':len(members.search(sex=MALE)),'female':len(members.search(sex=FEMALE)),'secrecy':len(members.search(sex=None))}
# print(mtfratio)
selected = True if group.puid in select_groups else False
# print("group_selected:",selected)
pcount = len(members) #群成员数量
group_infos.append({'gname':gname,'gowner':gowner,'pcount':pcount,'mtfratio':mtfratio,'puid':group.puid,'selected':selected})
# group_infos.append({'gname':gname,'gowner':gowner,'pcount':pcount,'puid':group.puid})
friends = bot.friends(update=True)[1:]
user_infos = []
sex_dict = {0:'保密',1:'男',2:'女'}
for friend in friends:
uname = friend.name
usex = sex_dict[friend.sex]
puid = friend.puid
selected = True if friend.puid in select_friends else False
# print("friend_selected",selected)
user_infos.append({'uname':uname,'usex':usex,'puid':friend.puid,'selected':selected})
ug_detail_info={'user_info':user_infos,'group_info':group_infos}
# 如果回调函数不为空,则调用回调函数
if data_intelligent:
print("调用回调函数返回:data_intelligent")
data_intelligent(ug_detail_info,username)
#直接返回
else:
print("直接返回:data_intelligent")
return ug_detail_info
def start_data_intelligent(self,puid,username):
"""
异步的方式获取好友和群组数据
return : 通过回调函数"callback_data_intelligent"反馈结果,参数为:data,username
"""
# 创建线程
data_intelligent = Thread(
target=self.get_data_intelligent,
args=(
puid,username,
self.callback_data_intelligent
))
data_intelligent.start()
print('启动:start_data_intelligent')
def callback_data_intelligent(self,data,username):
"""
数据分析完成后的回调函数
"""
# print(data,username)
# channel = lc.get_channels(username=username)
# while not channel:
# channel = lc.get_channels(username=username)
# time.sleep(1)
for _ in range(3):
cm.reply_channel_send(username,{
'intelligent_result':data
}
)
time.sleep(2)
def callback_analysis_result(self,data,username):
"""
智能聊天模块加载完成后的回调函数
"""
# channel = lc.get_channels(username=username)
# while not channel:
# channel = lc.get_channels(username=username)
# time.sleep(1)
# channel.reply_channel.send({
# 'text': json.dumps({
# 'analysis_result':data
# })
# })
for _ in range(3):
cm.reply_channel_send(username,{
'analysis_result':data
}
)
time.sleep(2)
# 增加需要被管理的机器人
def add_bot(self, puid, bot ,username):
"""
用于将需要被管理的机器人线程加入进来
:param bot_uuid
* 机器人的uuid号
:param bot
"""
print("添加时pid",os.getpid())
fs = Functional_scheduler(bot,username)
self.robots[puid] =[bot,fs]
# fs.setDaemon(True)
fs.start()
def get_bot(self, puid):
print('get_bot')
try:
# def func():
# print('子进程PID:%s'%os.getpid())
# bot = self.robots.get(puid)
# if bot:
# print(bot[0])
# return bot[0]
# for _ in range(10):
# p = Process(target=func)
# p.start()
# pass
# pid = os.fork()
# if pid < 0:
# print('创建进程失败')
# elif pid == 0:
# print('子进程PID:%s'%os.getpid())
# bot = self.robots.get(puid)
# if bot:
# return bot[0]
# else:
# sleep(0.5)
# print('父进程PID:%s'%os.getpid())
print("获取时pid",os.getpid())
return self.robots[puid][0]
# for i in range(1,10):
# bot = self.robots.get(puid)
# if bot:
# print("get_bot------------------------", bot)
# return bot[0]
# else:
# print('没有获取到,尝试下一次获取...')
# time.sleep(0.1) #0.1,0.2...
# else:
# return None
except:
return None
def get_fs(self,puid):
try:
return self.robots[puid][1]
except:
return None
def del_bot(self,puid):
bot = self.get_bot(puid)
bot.registered.disable()
del self.robots[puid]
def select_obj(self,puid):
# 获取Functional_scheduler对象
fs = self.get_fs(puid)
# 获取所有的好友和群组信息
friends_all = fs.friends_all
groups_all = fs.groups_all
# 从数据库中获取所有已经被选中的好友和群组puid
m_friends = models.SelectedFriends.objects.all()
m_groups = models.SelectedGroups.objects.all()
select_friends = []
select_groups = []
for f in m_friends:
friend = friends_all.search(puid =f.fid)
if friend:
select_friends.append(friend[0])
for g in m_groups:
group = groups_all.search(puid =g.gid)
if group:
select_groups.append(group[0])
return {'select_friends':select_friends,'select_groups':select_groups}
robot_management = Robot_management()
class Functional_scheduler(Thread):
def __init__(self,bot,username):
super().__init__()
self.bot = bot
self.username = username
self.friends = []
self.groups = []
self.select_function = {}
self.regularly_send_flag =True
# 获取所有的好友和群组对象
self.friends_all = bot.friends() #获取更新好友列表
self.groups_all = bot.groups()
def run(self):
self.functional_scheduler()
def functional_scheduler(self):
bot = self.bot
friends = self.friends
groups = self.groups
tuling = Tuling(api_key='91bfe84c2b2e437fac1cdb0c571cac91')
def get_plug(msg):
"""
获取插件方法和插件所在路径
"""
try:
msg_type = msg.type
print('消息类型:',msg_type)
print("select_function:",self.select_function[msg_type])
# 用已注册除all外的所有插件去匹配消息内容
for keyword in self.select_function[msg_type]:
if msg_type != "Text":
continue
res = re.search(keyword,msg.text)
if res:
print("匹配结果:",res.group())
print(keyword)
function_name = self.select_function[msg.type][keyword].get('attr_name')
plug_dir = self.select_function[msg.type][keyword].get('plug_dir')
break
# 如果用没有匹配到任何内容,则使用all来匹配
else:
function = self.select_function[msg.type]
print(function)
if function.get("None"):
function_name = function["None"].get('attr_name')
plug_dir = function["None"].get('plug_dir')
else:
print("没有匹配到function")
return None ,None
print("匹配到的function_name为:",function_name)
return pm.register_plugs[function_name].main,plug_dir
except Exception as e:
print('获取方法出错',e)
return None ,None
def message_parser(msg):
"""
解析接受到的消息并进行解析
选择合适的插件进行处理
:params 接收到的消息对象
:return plug
"""
fd1,fd2 = Pipe(duplex=False)
function,plug_dir = get_plug(msg)
print(function)
if function:
# 创建一个用于自动回复的进程
p = Process(target=function,args=(msg,plug_dir,fd2))
p.start()
# msg.reply("消息处理中...")
# # 阻塞等待消息处理完毕
p.join()
result = fd1.recv()
# 关闭管道
fd1.close()
try:
if type(result) == list:
for line in result:
# print(line)
yield line
else:
yield result
except Exception as e:
print('获取插件返回结果出现错误:',e)
return ("执行插件失败"+e)
print(ret)
return ret
# 图灵回复
@bot.register(self.friends)
def friends_message(msg):
print('[接收来自好友:]' + str(msg))
# 对接受到的消息进行解析
# 并根据消息类型选择插件进行处理
# 获取消息的解析结果
# ret = message_parser(msg)
# 图片
# msg.reply('@img@/home/tarena/WxRobot/static/upload/Plugs/Web_Image2/timg.jpg')
# 视频
# msg.reply("@vid@/home/tarena/WxRobot/static/upload/Plugs/Auto_Ai/f66ee8c095d1e3e448bc4e69958cda9e.mp4")
# 文件
# msg.repl("@fil@/home/tarena/WxRobot/wxRobot/urls.py")
for info in message_parser(msg):
print(info)
content_type = info.get('type')
if content_type== "@msg@" or not content_type:
ret = info['content']
else:
ret = content_type +info['content']
print(type(ret))
print('发送消息:',ret)
msg.reply(ret)
@bot.register(self.groups)
def group_message(msg):
print('[接收来自群聊:]' + str(msg))
if msg.is_at:
# 对接受到的消息进行解析
# 并根据消息类型选择插件进行处理
# 获取消息的解析结果
ret = message_parser(msg)
print('[发送]' + str(ret))
return ret
def refresh_listening_obj(self,puid):
print('================----------------================')
bot = robot_management.get_bot(puid)
print(puid,bot,sep='\n')
# 获取所有的好友和群组对象
friends = self.friends_all
groups = self.groups_all
# 从数据库中获取所有已经被选中的好友和群组puid
m_friends = models.SelectedFriends.objects.all()
m_groups = models.SelectedGroups.objects.all()
# 用从数据库中查找出已被选中的好友或者群组Puid获取对应的对象
select_friends = []
select_groups = []
# 清空上一次的选中的内容
self.friends.clear()
self.groups.clear()
# 两种方法,列表生成式和普通遍历
# self.friends = [friends.search(puid == f.puid) for f in m_friends if friends.search(puid == f.puid)]
for f in m_friends:
friend = friends.search(puid =f.fid)
if friend and friend[0] not in self.friends:
# print("添加好友:",friend[0])
self.friends.append(friend[0])
# self.groups = [groups.search(puid == g.puid) for g in m_groups if groups.search(puid == g.puid)]
for g in m_groups:
group = groups.search(puid =g.gid)
if group and groups[0] not in self.groups:
# print("添加群聊:",group[0])
self.groups.append(group[0])
# print(self.friends,self.groups,sep="\n")
def refresh_function(self):
# 获取插件用户所拥有插件信息
plug_querys = models.UserInfo.objects.filter(username = self.username).first().userplugs_set.filter(isActive=True)
# 清空所有原先功能状态
self.select_function.clear()
self.select_function = {"Text":{},"Map":{},"Card":{},"Note":{},"Sharing":{},"Picture":{},
"Recording":{},
"Attachment":{},
"Video":{},
"Friends":{},
"System":{},
}
for plug_query in plug_querys:
# 如果插件没有激活
if not plug_query.plug.isActive:
continue
# 获取插件属性
plug = plug_query.plug
# 获取插件存储路径
file_path = plug.plug.path
# 获取调用方法名
l = file_path.split("/")
attr_name = l[-1:][0][:-4]
# 将包名的首字母转换为大写,然后作为文件夹名称
dir_name = l[-1][:-4].title()
# 将路径和文件名成拼接,组成新的路径
plug_dir = "/".join( l[:-1] )+"/"+dir_name
print(plug_dir)
self.select_function[plug.msg_type][str(plug.wake_word)] = {
'title':plug.ptitle,
'pdescribe':plug.pdescribe,
'attr_name':attr_name,
'plug_dir':plug_dir,
}
print("select_function",self.select_function)
def refresh_regularly_send(self):
user= models.UserInfo.objects.filter(username=self.username).first()
print('dir',dir(user.timer))
timer = user.timer.strftime('%H:%M')
# 将时间字符串转换为时间戳
h,m = timer.strip().split(':')
seconds = int(h)*3600+int(m)*60
print("{0}被转换成时间戳后为:{1}".format(user.timer,seconds))
res_dict = {
"seconds" : seconds,
"repetition" : user.repetition,
"text":user.text,
"timer_send_isActive" : user.timer_send_isActive,
}
return res_dict
def stop_regularly_send(self):
# self.regularly_send_flag = False
try:
#终止定时发送线程
debug.kill_thread(self.regularly_send_thread)
except Exception as e:
print('终止定时发送线程失败!!!',e)
return False
return True
def start_regularly_send(self,seconds,text,repetition):
# 获取puid身份标识符
puid = self.bot.user_details(self.bot.self).puid
select_obj = robot_management.select_obj(puid)
print(seconds,text,repetition)
def run():
while True:
print('正在等待....')
time.sleep(seconds)
# 给所有被关注的好友或者群聊发送提示信息
for item in select_obj:
for friend in select_obj[item]:
friend.send(text)
print('发送给:',friend)
# 为了防止发送消息频率过快导致意想不到的后果
# 这里每发送一条消息,休息0.5秒
time.sleep(0.5)
if repetition == "once":
user= models.UserInfo.objects.filter(username=self.username).first()
user.timer_send_isActive = False
user.save()
print('发送完毕')
break
self.regularly_send_thread = Thread(target=run)
self.regularly_send_thread.start()
| [
"1194681498@qq.com"
] | 1194681498@qq.com |
e914fd2dad05b2f9beae9516cd3e87c48c168c6f | 2eb8659df54a83f3c0f7ade1ca12fa23d289cdf2 | /musixmatch/test.py | c890a48e172cadb48dcca2d3a807e2ae24a89207 | [] | no_license | zmbush/mp-complete | 910c67c8e1ca83c549fb88a9b52e65ff8feba8bd | a590a03aa3afbd80d960e54581f6fd704d31d232 | refs/heads/master | 2023-05-10T21:52:30.568201 | 2012-08-27T18:06:54 | 2012-08-27T18:06:54 | 3,475,577 | 0 | 0 | null | 2023-05-01T20:13:49 | 2012-02-18T02:03:42 | JavaScript | UTF-8 | Python | false | false | 3,238 | py | #!/usr/bin/env python
"""
test.py
by Amelie Anglade and Thierry Bertin-Mahieux
amelie.anglade@gmail.com & tb2332@columbia.edu
Testing code for the pyMusixMatch wrapper.
Can serve as a demo.
(c) 2011, A. Anglade and T. Bertin-Mahieux
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import track as TRACK
import artist as ARTIST
import tracking as TRACKING
def die_with_usage():
""" HELP MENU """
print 'test.py'
print ' by A. Anglade and T. Bertin-Mahieux'
print ' New York Music Hack Day, February 2011'
print ''
print 'This code test the MusixMatch Python API'
print 'It also serves as a demo.'
print "CAREFUL: it uses API requests, you're probably limited."
print ''
print 'USAGE'
print ' python test.py -go'
sys.exit(0)
if __name__ == '__main__':
# help menu
if len(sys.argv) < 2 or sys.argv[1] in ('help','-help','--help'):
die_with_usage()
# create a track
track = TRACK.Track(4110618)
print '*********** TRACK 4110618 ACQUIRED ************'
print track
# get a list of tracks from a search
tracks = TRACK.search(q='Rick Astley Never Gonna Give You Up')
print '********** LIST OF TRACKS ACQUIRED ************'
# for k in range(min(3,len(tracks))):
# print tracks[k]
print tracks[0]
handle = open("track.txt", 'w')
handle.write(str(tracks[0]))
# get a list of tracks from charts
tracks = TRACK.chart()
print '****** LIST OF TRACKS FROM CHART ACQUIRED ******'
for k in range(min(3,len(tracks))):
print tracks[k]
# lyrics
lyrics_dict = track.lyrics()
print '************* LYRICS ACQUIRED ************'
print lyrics_dict
# artist
artist = ARTIST.Artist(10832)
print '*********** ARTIST 10832 ACQUIRED ************'
print artist
# get a list of artists from a search
artists = ARTIST.search(q='Jean Leloup')
print '********** LIST OF ARTISTS ACQUIRED ************'
for k in range(min(3,len(artists))):
print artists[k]
# get a list of artists from charts
artists = ARTIST.chart()
print '**** LIST OF ARTISTS FROM CHART ACQUIRED *******'
for k in range(min(3,len(artists))):
print artists[k]
# get a base url for my domain
base_url = TRACKING.get_tracking_url('http://myawesomewebsite.com')
print '********** TRACKING URL BUILT ************'
print base_url
# get clearance rights for my song
song_rights_url = TRACKING.rights_clearance(base_url,'Bon Jovi','Leaving on a Prayer')
print '********** SONG CLEARANCE RIGHTS ACQUIRED ************'
print song_rights_url
| [
"crabb.andre@gmail.com"
] | crabb.andre@gmail.com |
0474fa45198217580e510aa508fb250d4bbf4922 | 9d346f2981c8cf4cfc21abb73e14278faa9b0e12 | /leetcode/leetcode2.py | 95eff1f96f94b26dd6f2fabad86d7a9b64b86ad3 | [] | no_license | wwwzhangshenzecn/code | c13caf013f780642d47b23a3ed67f58cfcb13f95 | acac4ed98a14c4d2f3636c766ba5fda17896a676 | refs/heads/master | 2020-11-29T09:11:49.555287 | 2020-05-20T14:39:24 | 2020-05-20T14:39:24 | 230,077,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113,626 | py | # Definition for a binary tree node.
# -*- coding: utf-8 -*-
# @Time : 2018/11/11 14:55
# @Author : zz
# @Project : workspace2
# @FileName: leetcode2.py
# 著作权归作者所有(随便拷)。
'''
.-~~~~~~~~~-._ _.-~~~~~~~~~-.
__.' ~. .~ `.__
.'// \./ \\`.
.'// | \\`.
.'// .-~"""""""~~~~-._ | _,-~~~~"""""""~-. \\`.
.'//.-" `-. | .-' "-.\\`.
.'//______.============-.. \ | / ..-============.______\\`.
.'______________________________\|/______________________________`.
'''
import this
from decimal import Decimal, getcontext
import copy
from collections import defaultdict
import json
import time
from functools import wraps
import collections
import functools
import math
import heapq
import numpy as np
import random
class NumMatrix:
def __init__(self, matrix: list):
self.matrix = np.asarray(matrix)
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
return np.sum(self.matrix[row1:row2 + 1, col1:col2 + 1])
class MaxHeap(object):
def __init__(self):
self._data = []
self._count = len(self._data)
def size(self):
return self._count
def isEmpty(self):
return self._count == 0
def add(self, item):
# 插入元素入堆
self._data.append(item)
self._count += 1
self._shiftup(self._count - 1)
def pop(self):
# 出堆
if self._count > 0:
ret = self._data[0]
self._data[0] = self._data[self._count - 1]
self._count -= 1
self._shiftDown(0)
return ret
def getTOP(self):
if self._count > 0:
return self._data[0]
def _shiftup(self, index):
# 上移self._data[index],以使它不大于父节点
parent = (index - 1) >> 1
while index > 0 and self._data[parent] < self._data[index]:
# swap
self._data[parent], self._data[index] = self._data[index], self._data[parent]
index = parent
parent = (index - 1) >> 1
def _shiftDown(self, index):
# 上移self._data[index],以使它不小于子节点
j = (index << 1) + 1
while j < self._count:
# 有子节点
if j + 1 < self._count and self._data[j + 1] > self._data[j]:
# 有右子节点,并且右子节点较大
j += 1
if self._data[index] >= self._data[j]:
# 堆的索引位置已经大于两个子节点,不需要交换了
break
self._data[index], self._data[j] = self._data[j], self._data[index]
index = j
j = (index << 1) + 1
def __repr__(self):
return sorted(self._data, reverse=True)
class MyQueue:
def __init__(self):
"""
Initialize your data structure here.
"""
self.queue = []
def push(self, x: int) -> None:
"""
Push element x to the back of queue.
"""
self.queue.append(x)
def pop(self) -> int:
"""
Removes the element from in front of queue and returns that element.
"""
if self.queue.__len__() > 0:
return None
return self.queue.pop(0)
def peek(self) -> int:
"""
Get the front element.
"""
if self.queue.__len__() > 0:
return None
return self.queue[0]
def empty(self) -> bool:
"""
Returns whether the queue is empty.
"""
return self.queue.empty()
class MyStack:
def __init__(self):
"""
Initialize your data structure here.
"""
self.queue = []
self.len = 0
def push(self, x: int) -> None:
"""
Push element x onto stack.
"""
self.queue.insert(self.len, x)
self.len += 1
def pop(self) -> int:
"""
Removes the element on top of the stack and returns that element.
"""
self.len -= 1
return self.queue.pop()
def top(self) -> int:
"""
Get the top element.
"""
return self.queue[-1]
def empty(self) -> bool:
"""
Returns whether the stack is empty.
"""
return self.len == 0
class WordDictionary(object):
'''
词典的搜索
'''
def __init__(self):
"""
Initialize your data structure here.
"""
self.dictionary = collections.defaultdict(set)
self.search_d = collections.defaultdict(bool)
def addWord(self, word):
"""
Adds a word into the data structure.
:type word: str
:rtype: None
"""
if not word:
return
indexs = [word[0], '.']
for index in indexs:
self.dictionary[index].add(word)
self.search_d.clear()
def check(self, key, word):
if len(self.dictionary[key]) == 0:
return False
for w in self.dictionary[key]:
if len(w) != len(word):
continue
flag = 0
for i in range(len(w)):
if word[i] == '.':
continue
else:
if w[i] != word[i]:
flag = 1
break
if flag == 0:
self.search_d[word] = True
return True
self.search_d[word] = False
return False
def cache(self, search_word):
'''
对完成搜索的word直接进行缓存
:param search_word:
:return:
'''
if search_word in self.search_d.keys():
return self.search_d[search_word]
return None
def search(self, word):
"""
Returns if the word is in the data structure.
A word could contain the dot character '.' to represent any one letter.
:type word: str
:rtype: bool
"""
if not word:
return
if self.cache(word) is not None:
print('已进行搜索:', word)
return self.search_d[word]
if word.startswith('.'):
return self.check('.', word)
else:
return self.check(word[0], word)
class Trie(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.trie = collections.defaultdict(set)
def insert(self, word):
"""
Inserts a word into the trie.
:type word: str
:rtype: None
"""
if len(word) == 0:
return
for i in range(len(word)):
ch = word[0:i + 1]
self.trie[ch].add(word)
def search(self, word):
"""
Returns if the word is in the trie.
:type word: str
:rtype: bool
"""
return word in self.trie[word]
def startsWith(self, prefix):
"""
Returns if there is any word in the trie that starts with the given prefix.
:type prefix: str
:rtype: bool
"""
return len(self.trie[prefix]) > 0
class LRUCache:
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
def __init__(self, capacity: int):
self.LRUkeyslist = list()
self.LRUdict = dict()
self.capacity = capacity
def get(self, key: int) -> int:
if key in self.LRUdict.keys():
# 取一次key,使用一次,将该健对更新到最近使用,即将该key位置改为list的最后
self.LRUkeyslist.remove(key)
self.LRUkeyslist.append(key)
return self.LRUdict[key]
else:
return -1
def put(self, key: int, value: int) -> None:
if key not in self.LRUdict.keys():
# key 不再字典中,则进行插入
if self.LRUkeyslist.__len__() == self.capacity:
# 字典满了,删除队列第一个key,同时删除dict中对应键对
k = self.LRUkeyslist[0]
del self.LRUkeyslist[0]
self.LRUdict.pop(k)
# 随后将键对插入到队列最后,同时更新字典
self.LRUdict.update({key: value})
self.LRUkeyslist.append(key)
else:
# 设置新值也需将该键对移动到最后
self.LRUdict[key] = value
self.LRUkeyslist.remove(key)
self.LRUkeyslist.append(key)
class LRUCache:
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
def __init__(self, capacity: int):
self.LRUdict = collections.OrderedDict()
self.capacity = capacity
def get(self, key: int) -> int:
if key in self.LRUdict.keys():
# 取一次key,使用一次,将该健对更新到最近使用,即将该key位置改为list的最后
self.LRUdict.move_to_end(key)
return self.LRUdict[key]
else:
return -1
def put(self, key: int, value: int) -> None:
if key not in self.LRUdict.keys():
# key 不再字典中,则进行插入
if len(self.LRUdict) == self.capacity:
# 字典满了,删除队列第一个key,同时删除dict中对应键对
self.LRUdict.popitem(False)
self.LRUdict.update({key: value})
else:
# 设置新值也需将该键对移动到最后
del self.LRUdict[key]
self.LRUdict[key] = value
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x=0):
self.val = x
self.next = None
def Isprime(n):
if n <= 3:
return True
for i in range(2, int(math.sqrt(n)) + 1):
if n % i == 0:
return False
return True
def buildLink(l: list) -> ListNode:
if len(l) == 0:
return None
if len(l) == 1:
return ListNode(l[0])
else:
head = ListNode(l[0])
r = head
for i in range(1, len(l)):
r.next = ListNode(l[i])
r = r.next
return head
def printLint(l: ListNode):
while l:
print(l.val, end=' ')
l = l.next
print()
def sort(num: list):
i = len(num) - 1
while i >= 0:
if i != num[i]:
index_i = num.index(i)
index0 = num.index(0)
num[index0], num[i] = num[i], num[index0]
num[i], num[index_i] = num[index_i], num[i]
i -= 1
print(num)
class TreeNode:
def __init__(self, x=0):
self.val = x
self.left = None
self.right = None
def printTree(root):
if not root:
pass
else:
print(root.val, end=' ')
printTree(root.left)
printTree(root.right)
class BSTIterator:
def __init__(self, root: TreeNode):
self.root = root
def next(self) -> int:
"""
@return the next smallest number
"""
pre = None
ptr = self.root
while ptr.left:
pre = ptr
ptr = ptr.left
value = ptr.val
if not pre:
self.root = self.root.right
return value
if not ptr.left and ptr.right:
pre.left = ptr.right
else:
pre.left = None
ptr = None
return value
def hasNext(self) -> bool:
"""
@return whether we have a next smallest number
"""
pass
return True if self.root else False
def BuildTree(layer: list, mid: list):
# 层次遍历和中序遍历建树
if mid == []:
return None
else:
i = 0
while layer[i] not in mid:
i += 1
root_val = layer[i]
layer.remove(root_val)
root_index = mid.index(root_val)
root = TreeNode(root_val, BuildTree(
layer, mid[0:root_index]), BuildTree(layer, mid[root_index + 1:]))
return root
# Definition for a Node.
class Node:
def __init__(self, val, next=None, random=None):
self.val = val
self.next = next
self.random = random
def stringToTreeNode(input):
input = input.strip()
input = input[1:-1]
if not input:
return None
inputValues = [s.strip() for s in input.split(',')]
root = TreeNode(int(inputValues[0]))
nodeQueue = [root]
front = 0
index = 1
while index < len(inputValues):
node = nodeQueue[front]
front = front + 1
item = inputValues[index]
index = index + 1
if item != "null":
leftNumber = int(item)
node.left = TreeNode(leftNumber)
nodeQueue.append(node.left)
if index >= len(inputValues):
break
item = inputValues[index]
index = index + 1
if item != "null":
rightNumber = int(item)
node.right = TreeNode(rightNumber)
nodeQueue.append(node.right)
return root
def integerListToString(nums, len_of_list=None):
if not len_of_list:
len_of_list = len(nums)
return json.dumps(nums[:len_of_list])
def Time(name='FUNC', n=1):
def decorate(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
for _ in range(n - 1):
func(*args, **kwargs)
result = func(*args, **kwargs)
print('\n函数: {name:>10}\nargs: {args}\n运行 {n}次\n需要时间: {time}\n'.format(
name=func.__name__, n=n, args=args, time=time.time() - start))
return result
return wrapper
return decorate
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = []
self.min = -1
def push(self, x: int) -> None:
if self.stack:
self.stack.append(x)
if x < self.stack[self.min]:
self.min = len(self.stack) - 1
else:
self.stack.append(x)
self.min = 0
def pop(self) -> None:
return self.stack.pop()
def top(self) -> int:
return self.stack[-1]
def getMin(self) -> int:
return self.stack[self.min]
class Solution:
def maxPathSum(self, root: TreeNode) -> int:
nonroot = root.val
# 124.
def dfs(root: TreeNode) -> int:
nonlocal nonroot
if not root:
return float('-inf')
else:
left = dfs(root.left)
right = dfs(root.right)
mid = root.val
nonroot = max(nonroot, left + right + mid, left, right)
return max(mid, left + mid, right + mid)
return max(dfs(root), nonroot)
def metricword(self, word1, word2):
count = 0
for i in range(len(word1)):
if word1[i] != word2[i]:
count += 1
if count > 1:
return 2
return count
@Time()
def metric(self, word):
preboard = defaultdict(list)
for j in range(len(word)):
for i in range(j):
if self.metricword(word[i], word[j]) == 1:
preboard[word[j]].append(word[i])
preboard[word[i]].append(word[j])
return preboard
@Time()
def bfs(self, w, word, preboard, beginWord):
stack = [w] # 栈
p = []
count = 0
while stack:
w = stack[0]
del stack[0]
if w == beginWord:
return count
for i in preboard[w]:
if i not in p:
p.append(i)
if len(stack) == 0:
p, stack = stack, p
count += 1
return 0
@Time(n=1)
def findLadders(self, beginWord: str, endWord: str, wordList: list) -> list:
# 初步想法就是将这个序列变成一张图,然后寻找路劲
# 看了一个博客
# bfs从前往后最短路劲
# dfs从后往前确定路径
results = []
if beginWord not in wordList:
word = [beginWord] + wordList
else:
word = wordList
preboard = self.metric(word)
minlen = self.bfs(endWord, word, preboard, beginWord) + 1
def dfs(w=endWord, result=[endWord]):
if len(result) > minlen:
pass
elif w == beginWord:
results.append(result[::-1])
else:
for t in preboard[w]:
if t not in result:
dfs(t, result + [t])
dfs()
return results
def findLadders2(self, beginWord: str, endWord: str, wordList: list) -> list:
import collections
import string
if endWord not in wordList or not endWord or not beginWord:
return []
wordList = set(wordList)
s, e = {beginWord}, {endWord}
d = 1
par = collections.defaultdict(set)
ls = set(string.ascii_lowercase)
while s and e:
if len(s) > len(e):
s, e = e, s
d *= -1
temp = set()
new = collections.defasultdict(set)
wordList -= s
for word in s:
for i in range(len(word)):
first, second = word[:i], word[i + 1:]
for ch in ls:
combined_word = first + ch + second
if combined_word in wordList:
temp.add(combined_word)
if d == 1:
new[combined_word].add(word)
else:
new[word].add(combined_word)
s = temp
par.update(new)
if temp & e:
res = [[endWord]]
while res[0][0] != beginWord:
res = [[p] + w for w in res for p in par[w[0]]]
return res
return []
@Time(n=1)
def findLadders3(self, beginWord: str, endWord: str, wordList: list):
# 在建立图的过程中记录路径长度
if endWord not in wordList:
return 0
wordList = set(wordList) - set([beginWord])
path = collections.defaultdict(int) # 记录此前单词的路径长度
path[beginWord] = 0
father = collections.defaultdict(str) # 几率此前单词的父节点,即父通过一次变换可以得到当前结点
father[beginWord] = beginWord
pathNode = collections.defaultdict(list)
p = [beginWord]
q = []
while len(p) > 0:
top = p.pop()
pathNode[top] = pathNode[father[top]] + [top]
path[top] = path[father[top]] + 1
if top == endWord:
break
for j in range(len(top)):
for i in 'abcdefghijklmnopqrstuvwxyz':
w = top[:j] + i + top[j + 1:]
if w in wordList:
father[w] = top
q.append(w)
wordList = wordList - set([w])
if p == []:
p, q = q, p
return path[endWord]
def ladderLength(self, beginWord, endWord, wordList):
queue = [(beginWord, 1)]
visited = set()
while queue:
word, dist = queue.pop(0)
if word == endWord:
return dist
for i in range(len(word)):
for j in 'abcdefghijklmnopqrstuvwxyz':
tmp = word[:i] + j + word[i + 1:]
if tmp not in visited and tmp in wordList:
queue.append((tmp, dist + 1))
visited.add(tmp)
return 0
@Time(n=1)
def findLadders4(self, beginWord: str, endWord: str, wordList: list):
# 在建立图的过程中记录路径长度
if endWord not in wordList:
return 0
wordList = set(wordList) - set([beginWord])
path = collections.defaultdict(int) # 记录此前单词的路径长度
father = collections.defaultdict(str) # 几率此前单词的父节点,即父通过一次变换可以得到当前结点
father[beginWord] = beginWord
p = [beginWord]
q = []
while len(p) > 0:
top = p.pop()
path[top] = path[father[top]] + 1
if top == endWord:
break
for j in range(len(top)):
for i in 'abcdefghijklmnopqrstuvwxyz':
w = top[:j] + i + top[j + 1:]
if w in wordList:
father[w] = top
q.append(w)
wordList = wordList - set([w])
if p == []:
p, q = q, p
return path[endWord]
@Time(n=10000)
def longestConsecutive(self, nums: list) -> int:
nums = list(set(nums))
maxlen = 0
start, end = 0, 0
while start < len(nums):
end = start
while end < len(nums) - 1 and nums[end + 1] - nums[end] <= 1:
end += 1
maxlen = max(maxlen, end - start + 1)
start = end + 1
return maxlen
def SumPath(self, result):
s = 0
for k, v in enumerate(result[::-1]):
s += pow(10, k) * v
return s
def sumNumbers(self, root: TreeNode) -> int:
s = 0
def dfs(root=root, result=[]):
if not root:
pass
elif root and not root.left and not root.right:
nonlocal s
s += self.SumsPath(result + [root.val])
else:
dfs(root.left, result + [root.val])
dfs(root.right, result + [root.val])
dfs()
return s
def solve(self, board: list) -> None:
# 组建一个图,进行节点生长
if len(board) > 2:
Otable = collections.defaultdict(list)
start = []
grow = set()
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == 'O':
if i == 0 or i == len(board) - 1 or j == 0 or j == len(board[0]) - 1:
start.append((i, j))
if i > 0 and board[i - 1][j] == 'O':
Otable[(i, j)].append((i - 1, j))
if i < len(board) - 1 and board[i + 1][j] == 'O':
Otable[(i, j)].append((i + 1, j))
if j > 0 and board[i][j - 1] == 'O':
Otable[(i, j)].append((i, j - 1))
if j < len(board[0]) - 1 and board[i][j + 1] == 'O':
Otable[(i, j)].append((i, j + 1))
for s in start:
grow.add(s)
for ot in Otable[s]:
if ot not in grow:
start.append(ot)
grow.add(ot)
board[:] = [['O' if (i, j) in grow else 'X' for j in range(
len(board[0]))] for i in range(len(board))]
def partition(self, s: str) -> list:
# 回文分割
def recursion(s):
if len(s) == 0:
yield []
else:
for i in range(1, len(s) + 1):
if s[0:i] == s[0:i][::-1]:
for re in recursion(s[i:]):
yield [s[0:i]] + re
return [re for re in recursion(s)]
# return [[s[:i]] + rest
# for i in range(1, len(s) + 1)
# if s[:i] == s[i - 1::-1]
# for rest in self.partition(s[i:])] or [[]]
def partition2(self, s: str) -> list:
return [[s[0:i]] + re
for i in range(1, len(s) + 1)
if s[0:i] == s[0:i][::-1]
for re in self.partition(s[i:]) or [[]]]
def LongestCommonSubstring(self, a, b):
# 最长公共子串
table = [[0 for _ in range(len(b))] for _ in range(len(a))]
for i in range(len(a)):
for j in range(len(b)):
if b[j] == a[i]:
table[i][j] = 1
count = 0
for i in range(len(a)):
for j in range(len(b)):
if table[i][j] == 1:
k, v = i, j
temp = 0
while k < len(a) and v < len(b):
if table[k][v] == 1:
temp += 1
table[k][v] = 0
k += 1
v += 1
else:
break
count = max(count, temp)
return count
def MostMaxSet(self, p: list):
p.sort(key=lambda x: x[0])
xp = []
for k, v in enumerate(p[:-1]):
if v[1] >= p[k + 1][1]:
xp.append(v)
xp.append(p[-1])
p = sorted(xp, key=lambda x: x[1])
xp = []
for k, v in enumerate(p[:-1]):
if p[k + 1][0] <= v[0]:
xp.append(v)
xp.append(p[-1])
return xp
def isBoomerang(self, points) -> bool:
# 测试三个点是否共线
# 共线返回False
# 否则返回True
if (points[0][1] - points[1][1]) * (points[2][0] - points[1][0]) == \
(points[2][1] - points[1][1]) * (points[0][0] - points[1][0]):
return False
else:
return True
def bstToGst(self, root: TreeNode) -> TreeNode:
snode = 0
stack = []
head = root
while stack or root:
while root:
stack.append(root)
root = root.right
root = stack.pop()
root.val = snode + root.val
snode = root.val
root = root.left
return head
@Time(n=1)
def minScoreTriangulation(self, A: list) -> int:
# 这个方法以边卫递归基础,
# 反而以顶点为循环点
# 边加点的形式形成动态表格规划
table = [[-1 for _ in range(len(A))] for _ in range(len(A))]
def recusion(x=0, y=len(A) - 1, A: list = A):
if y - x <= 1:
return 0
if table[x][y] != -1:
return table[x][y]
else:
m = float('inf')
for i in range(x + 1, y):
m = min(m, recusion(x, i, A) +
recusion(i, y, A) + A[x] * A[y] * A[i])
table[x][y] = m
return m
return recusion()
def minScoreTriangulation1(self, A):
def dp(i, j):
if j - i + 1 < 3:
return 0
return min(A[i] * A[j] * A[k] + dp(i, k) + dp(k, j) for k in range(i + 1, j))
return dp(0, len(A) - 1)
@Time(n=1)
def minCut(self, s: str) -> int:
# 132
if len(s) == 0:
return 0
if s == s[::-1]:
return 0
for i in range(1, len(s)):
if s[:i] == s[:i][::-1] and s[i:] == s[i:][::-1]:
return 1
table = [float('inf') for _ in range(len(s))] + [0]
for i in range(len(s) - 1, -1, -1):
for j in range(i, len(s)):
if s[i:j + 1] == s[i:j + 1][::-1]:
table[i] = min(table[i], table[j + 1] + 1)
return table[0] - 1 if table[0] - 1 >= 0 else 0
def twoSum(self, nums: list, target: int) -> list:
for i in range(len(nums)):
try:
return [i, i + 1 + nums[i + 1:].index(target - nums[i])]
except:
pass
@Time(n=1)
def prisonAfterNDays(self, cells: list, N: int) -> list:
kt = []
for d in range(1, 16):
cells = [1 if 0 < i < len(
cells) - 1 and cells[i - 1] == cells[i + 1] else 0 for i in range(0, len(cells))]
if len(kt) > 1 and cells == kt[0]:
break
kt.append(cells)
return kt[N % len(kt) - 1]
def canCompleteCircuit1(self, gas: list, cost: list) -> int:
sum = 0
total = 0
i = 0
j = -1
while i < len(gas):
sum += gas[i] - cost[i]
total += gas[i] - cost[i]
if sum < 0:
j = i
sum = 0
i += 1
return j + 1 if total >= 0 else -1
# # 暴力
# if len(gas) == 0:return 0
# cg = [gas[i] - cost[i] for i in range(len(gas))]
# if sum(cg) < 0 : return -1
# for i in range(len(cg)):
# if cg[i] >= 0:
# j ,cost= i + 1, cg[i]
# while j < i + len(cg):
# cost += cg[j % len(cg)]
# if cost < 0:break
# j += 1
# if j % len(cg) == i and cost >=0:
# return i
# return -1
def isRobotBounded(self, instructions: str) -> bool:
d = {'x': 0, 'y': 0}
dx = 1
fG = {0: (0, 1), 1: (-1, 0), 2: (0, -1), 3: (1, 0)}
for _ in range(4):
for s in instructions:
if s == 'L':
dx = (dx + 1) % 4
if s == 'R':
dx = (dx - 1 + 4) % 4
if s == 'G':
d['x'], d['y'] = d['x'] + fG[dx][0], d['y'] + fG[dx][1]
print(d)
if d == {'x': 0, 'y': 0}:
return True
return False
@Time(n=1)
def gardenNoAdj(self, N: int, paths: list) -> list:
# 1042.
if paths == []:
return [1] * N
if N == 1:
return [1]
table = collections.defaultdict(set)
s = [1]
for x, y in paths:
table[x - 1].add(y - 1)
table[y - 1].add(x - 1)
if N == 1:
return [1]
for i in range(1, N):
fl = set([1, 2, 3, 4])
for j in range(i):
if i in table[j]:
fl = fl - set([s[j]])
s.append(list(fl)[0])
return s
def candy(self, ratings: list) -> int:
ac = [1] * len(ratings)
for i in range(len(ac) - 1):
if ratings[i] < ratings[i + 1]:
ac[i + 1] = max(ac[i + 1], ac[i] + 1)
for i in range(len(ac) - 1, 0, -1):
if ratings[i] < ratings[i - 1]:
ac[i - 1] = max(ac[i - 1], ac[i] + 1)
print(ac)
return sum(ac)
def singleNumber(self, nums: list) -> int:
n = len(nums)
x = nums[0]
for i in range(1, n):
x = x ^ nums[i]
print(x)
return x
def copyRandomList(self, head: 'Node') -> 'Node':
nodellist = []
root = head
while root:
nodellist.append(root)
root = root.next
nodellist.append(None)
randpmlist = [nodellist.index(node.random)
for node in nodellist if node != None]
deepcopy = [Node(node.val, None, None)
for node in nodellist if node != None] + [None]
for i in range(len(deepcopy) - 1):
deepcopy[i].next = deepcopy[i + 1]
deepcopy[i].random = deepcopy[randpmlist[i]]
return deepcopy[0]
def wordBreak(self, s: str, wordDict: list) -> bool:
stack, p, kword = [s], [], []
while stack != []:
s = stack.pop()
for w in wordDict:
if w == s[:len(w)]:
if s[len(w):] not in p:
p.append(s[len(w):])
if w == s:
return True
if stack == []:
p, stack = stack, p
return False
@Time(n=1)
def wordBreak2(self, s: str, wordDict: list) -> list:
# 先建立个有向图
# 进行图搜索,达到s的词尾则为一条路径
word_items = collections.defaultdict(list)
i = 0
# 建立一个有向图
while i < len(s):
for w in wordDict:
if w == s[i:i + len(w)]:
word_items[i].append([i + len(w), w])
i += 1
results = []
# 进行图的搜索/ 深度优先
def dfs(start, result=[]):
if start == len(s) and len(result) > 0:
results.append(' '.join(result))
else:
for end, w in word_items[start]:
dfs(end, result + [w])
dfs(0, [])
return results
@Time(n=1)
def wordBreak(s, wordDict):
n = len(s)
words, memo = set(wordDict), {n: [""]}
def dfs(i):
if i not in memo:
memo[i] = [s[i:j] + (rest and " " + rest) for j in range(i + 1, n + 1) if s[i:j] in words for rest
in dfs(j)]
return memo[i]
return dfs(0)
def hasCycle(self, head):
"""
:type head: ListNode
:rtype: bool
"""
import sys
import gc
gc.collect()
count = 0
while head != None:
# print(head.val)
# print(head.val,sys.getrefcount(head))
if sys.getrefcount(head) >= 5:
return count
head = head.next
count += 1
return -1
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
import sys
import gc
gc.collect()
count = 0
while head != None:
print(head.val, sys.getrefcount(head))
if sys.getrefcount(head) >= 5:
return head
head = head.next
count += 1
return None
def reorderList(self, head: ListNode) -> None:
"""
Do not return anything, modify head in-place instead.
"""
list1, list2, flag = [], [], 1
while head != None:
list1.append(head)
head = head.next
head = list1[0]
if len(list1) % 2 == 0:
list1, list2 = list1[:len(
list1) // 2], list1[len(list1) // 2:][::-1]
else:
list1, list2 = list1[:len(list1) // 2 +
1], list1[len(list1) // 2 + 1:][::-1]
list2.append(None)
list1.append(None)
for i in range(len(list1) - 1):
list1[i].next = list2[i]
for i in range(len(list2) - 1):
list2[i].next = list1[i + 1]
return head
def insertionSortList(self, head: ListNode) -> ListNode:
# 链表插入排序
if head == None or head.next == None:
return head
p, q = head, head.next
while q:
if q.val < p.val:
l, r = None, head
while r != q:
if r.val > q.val:
# 插入到头节点的前面
if l == None:
p.next = q.next
q.next = r
head = q
else:
# 插入到非头结点前
p.next = q.next
l.next = q
q.next = r
break
r, l = r.next, r
q = p.next
else:
p, q = q, q.next
return head
def sortList(self, head: ListNode) -> ListNode:
nodelist = []
while head:
nodelist.append(head)
head = head.next
nodelist.sort(key=lambda x: x.val)
nodelist.append(None)
for i in range(len(nodelist) - 1):
nodelist[i].next = nodelist[i + 1]
return nodelist[0]
def maxPoints(self, points: list) -> int:
from decimal import Decimal
if points.__len__() <= 1:
return points.__len__()
points = [(Decimal(point[0]), Decimal(point[1])) for point in points]
pc = collections.Counter(points)
points = list(set(points))
lines = collections.defaultdict(set)
for i in range(len(points)):
for j in range(i + 1, len(points)):
if points[j][0] == points[i][0]:
a, b = points[i][0], 'y'
elif points[i][1] == points[j][1]:
a, b = 'x', points[i][1]
else:
a = (points[j][1] - points[i][1]) / \
(points[j][0] - points[i][0])
b = points[j][1] - a * points[j][0]
print(a, b)
lines[(a, b)] = lines[(a, b)].union(
set([points[i], points[j]]))
return max([pc[points[0]]] + [sum([pc[p] for p in ps]) for line, ps in lines.items()])
def evalRPN(self, tokens: list) -> int:
stack = []
for t in tokens:
if t == '+':
a = stack.pop()
b = stack.pop()
stack.append(str(int(b) + int(a)))
elif t == '-':
a = stack.pop()
b = stack.pop()
stack.append(str(int(b) - int(a)))
elif t == '*':
a = stack.pop()
b = stack.pop()
stack.append(str(int(b) * int(a)))
elif t == '/':
a = stack.pop()
b = stack.pop()
stack.append(str(int(b) // int(a)))
else:
stack.append(t)
print(stack[-1])
return int(stack[0]) if stack != [] else 0
def reverseWords(self, s: str) -> str:
import re
return ' '.join((re.sub(re.compile('\s+'), ' ', s.strip())).split(' ')[::-1])
def maxProduct(self, nums: list) -> int:
temp, ma, mi = nums[0], nums[0], nums[0]
for i in range(1, len(nums)):
if nums[i] < 0:
ma, mi = mi, ma
ma *= nums[i]
if ma < nums[i]:
ma = nums[i]
mi *= nums[i]
if mi > nums[i]:
mi = nums[i]
if ma > temp:
temp = ma
return temp
def findMin(self, nums: list) -> int:
if len(nums) == 0:
return None
if len(nums) == 1:
return nums[0]
i = 0
while i < len(nums):
if nums[i] < nums[i - 1]:
break
i += 1
return nums[i % len(nums)]
def findMin2(self, nums: list) -> int:
if len(nums) == 0:
return None
if len(nums) == 1:
return nums[0]
i = 0
while i < len(nums):
if nums[i] < nums[i - 1]:
break
i += 1
return nums[i % len(nums)]
def gcdOfStrings(self, str1: str, str2: str) -> str:
for i in range(len(str2) + 1, 0, -1):
t = str2[:i]
if t * (len(str2) // len(t)) == str2 and len(str2) % len(t) == 0 and \
t * (len(str1) // len(t)) == str1 and len(str1) % len(t) == 0:
return t
return ''
def maxEqualRowsAfterFlips(self, matrix: list) -> int:
for i in matrix:
print(i)
diff = collections.defaultdict(int)
for i in range(len(matrix)):
for j in range(i + 1, len(matrix)):
if matrix[i] == matrix[j]:
diff[i] += 1
if [matrix[i][k] + matrix[j][k] for k in range(len(matrix[i]))] == [1 for _ in range(len(matrix[i]))]:
diff[i] += 1
return max(diff.values()) + 1
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
if headA == headB:
return headA
listA, listB = [], []
while headA:
listA.append(headA)
headA = headA.next
while headB:
listB.append(headB)
headB = headB.next
if len(listA) > len(listB):
listA, listB = listB, listA
for i in range(-1, -1 * len(listA) - 1, -1):
if listA[i] == listB[i]:
if i == -1 * len(listA):
return listA[i]
else:
if i == -1:
return None
return listA[i + 1]
return None
def findPeakElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) == 0:
return None
if len(nums) == 1:
return 0
for i in range(len(nums)):
if (i == 0 and nums[i] > nums[i + 1]) or \
(i == len(nums) - 1 and nums[i] > nums[i - 1]) or \
(nums[i - 1] < nums[i] > nums[i + 1]):
return i
def maximumGap(self, nums: list):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) < 2:
return 0
nums.sort()
maxgrap = 0
for i in range(1, len(nums)):
maxgrap = max(maxgrap, nums[i] - nums[i - 1])
return maxgrap
def compareVersion(self, version1, version2):
"""
:type version1: str
:type version2: str
:rtype: int
"""
version1 = [int(i) for i in version1.split('.')]
version2 = [int(i) for i in version2.split('.')]
version1 += [0 for _ in range(len(version2) - len(version1))]
version2 += [0 for _ in range(len(version1) - len(version2))]
for i in range(len(version1)):
if version1[i] > version2[i]:
return 1
elif version1[i] < version2[i]:
return -1
else:
pass
return 0
def fractionToDecimal(self, numerator, denominator):
"""
:type numerator: int
:type denominator: int
:rtype: str
:leetcode: 166.
"""
import math
def Isprime(n):
if n <= 3:
return True
for i in range(2, int(math.sqrt(n)) + 1):
if n % i == 0:
return False
return True
def findPow(p, n):
for i in range(1, n + 1):
if pow(p, i) <= n and pow(p, i + 1) > n:
return i
return -1
def findPrimesfactor(n):
if n <= 3:
return [n]
psf = []
for i in range(2, int(math.sqrt(n)) + 1):
if Isprime(i):
p = findPow(i, n)
if p >= 0:
psf.append(i)
n -= pow(i, p)
if len(psf) >= 3:
break
return psf
def divisor(numerator, denominator, n=16):
pass
def hunFraction(numerator, denominator):
# 混循环小数
# 分数化为混循环小数。一个最简分数能化为混循环小数的充分必要条件是分母既含有质因数2或5
# ,又含有2和5以外的质因数。
print('混循环小数')
getcontext().prec = 100
def chunFraction(numerator, denominator):
# 纯循环小数
# .分数化为纯循环小数。一个最简分数能化为纯循环小数的充分必要条件是分母的质因数里没有2和5,
# 其循环节的位数等于能被该最简分数的分母整除的最小的99…9形式的数中9的个数。
print('纯循环小数')
pass
def youFraciton(numerator, denominator):
# 有限小数。
# 分 数化为有限小数。一个最简分数能化为有限小数的充分必要条件是分母的质因数只有2和5
print('有限小数')
pass
# 辗转相除法
def maxcommondivisor(a, b):
x = a % b
while (x != 0):
a = b
b = x
x = a % b
return b
mcd = maxcommondivisor(numerator, denominator)
numerator //= mcd
denominator //= mcd
psf = set(findPrimesfactor(denominator))
print('psf:{}'.format(psf))
if psf == {2, 5}:
youFraciton(numerator, denominator)
elif len(psf.intersection({2, 5})) == 0 and len(psf) > 0:
chunFraction(numerator, denominator)
else:
hunFraction(numerator, denominator)
def fractionToDecimal1(self, numerator, denominator):
# 余数开始循环时,一个循环节开始出现
remainder = {}
sign = ''
res = []
if (numerator > 0 and denominator < 0) or (numerator < 0 and denominator > 0):
sign = '-'
numerator, denominator = abs(numerator), abs(denominator)
n, de = divmod(numerator, denominator)
res.append(str(n))
if de == 0:
return sign + str(n)
res.append('.')
remainder[de] = len(res)
while de != 0:
n, de = divmod(de * 10, denominator)
if de not in remainder.keys():
res.append(str(n))
remainder[de] = len(res)
else:
res.append(str(n))
res.insert(remainder[de], '(')
res.append(')')
break
return sign + ''.join(res)
def twoSum2(self, numbers: list, target: int) -> list:
pass
numdict = collections.defaultdict(list)
for i in range(len(numbers)):
numdict[numbers[i]].append(i)
for k in numdict.keys():
if target - k in numdict.keys():
if target - k == k and len(numdict[k]) >= 2:
return [numdict[k][0] + 1, numdict[k][1] + 1]
else:
return [numdict[k][0] + 1, numdict[target - k][0] + 1]
return [1, 2]
def convertToTitle(self, n: int) -> str:
alphabet_dict = dict(zip(range(0, 26), [chr(65 + i) for i in range(26)]))
results = ''
n -= 1
while 1:
if n < 26:
return alphabet_dict.get(n) + results
else:
n, de = divmod(n, 26)
n -= 1
print(de, n, alphabet_dict[de])
results = alphabet_dict.get(de) + results
return None
def majorityElement(self, nums: list) -> int:
pass
out, count = None, 1
for num in nums:
if out == num:
count += 1
elif count == 1:
out = num
else:
count -= 1
return out
def titleToNumber(self, s: str) -> int:
num_alphabet = dict(zip([chr(65 + i) for i in range(26)], range(0, 26)))
num_list = [num_alphabet.get(char) + 1 for char in list(s)]
sum = 0
for i in range(len(num_list)):
sum += num_list[i] * pow(26, len(num_list) - 1 - i)
return sum
def trailingZeroes(self, n: int) -> int:
count = 0
five = {}
for i in range(1, n + 1):
j = i
while j % 5 == 0:
count += 1
j /= 5
return count
def calculateMinimumHP(self, dungeon: list) -> int:
# 左右消耗最小的最大值
m, n = len(dungeon), len(dungeon[0])
dp = [[0] * n + [float('inf')] for _ in range(m)] + [[float('inf') for _ in range(n - 1)] + [1, 1]]
for i in range(m)[::-1]:
for j in range(n)[n::-1]:
dp[i][j] = max(min(dp[i + 1][j], dp[i][j + 1]) - dungeon[i][j], 1)
return dp[0][0]
def largestNumber(self, nums: list) -> str:
class largestnumKey(str):
def __lt__(x, y):
return x + y > y + x
largest_num = ''.join(sorted(map(str, nums), key=largestnumKey))
return '0' if largest_num[0] == '0' else largest_num
def findRepeatedDnaSequences(self, s: str) -> list:
length = 10
result = []
dict_ATCG = collections.defaultdict(int)
for i in range(len(s)):
dict_ATCG[s[i:i + length]] += 1
for k, v in dict_ATCG.items():
if v >= 2:
result.append(k)
return result
def maxProfit(self, k: int, prices: list) -> int:
if k > len(prices) // 2:
return sum(max(prices[i] - prices[i - 1], 0) for i in range(1, len(prices)))
buys, sells = [float('-inf') for _ in range(k + 1)], [0 for _ in range(k + 1)]
for p in prices:
for i in range(1, k + 1):
buys[i] = max(buys[i], sells[i - 1] - p)
sells[i] = max(sells[i], buys[i] + p)
print('P : ', p, '\n', buys, '\n', sells)
return sells[-1]
def rotate(self, nums: list, k: int) -> None:
print(' In- nums id:', id(nums))
k %= len(nums)
# 记住,如果使用nums = ...的话,方法调用结束后,实参不会做改变
nums[:] = nums[-2:] + nums[:-2]
def reverseBits(self, n):
n = str(bin(n))[2:]
n = '0' * (32 - len(n)) + n
return int(n[::-1], base=2)
def hammingWeight(self, n):
n = str(bin(n))[2:]
count = 0
for i in n:
if i == '1':
count += 1
return count
def bash1(self):
f = open('words.txt', 'r')
words = f.read().replace('\n', ' ').replace('\r', '').split(' ')
wl = collections.Counter(words)
for k, v in wl.items():
print(k, v)
f.close()
@Time(n=100)
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
dp = [0 for _ in range(len(nums) + 2)]
money_max = 0
K = len(nums) // 2 if len(nums) % 2 == 0 else len(nums) // 2 + 1
for n in range(K):
for i in range(len(nums)):
dp[i] = max(dp[i + 2:]) + nums[i]
money_max = max(dp)
return money_max
def Levelorder(self, root, ltr=True):
# root 树根
# 层次遍历,
# stack 存储当前遍历节点, p 存储下一层遍历结点
if not root:
return []
stack, p = [], []
stack.append(root)
while stack:
if not ltr:
ptr = stack.pop()
else:
ptr = stack[0]
del stack[0]
yield (ptr.val)
if ptr.left:
p.append(ptr.left)
if ptr.right:
p.append(ptr.right)
if stack == []:
p, stack = stack, p
def rightSideView(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
stack, p = [], []
result = []
stack.append(root)
result.append(root.val)
while stack:
ptr = stack[0]
del stack[0]
if ptr.left:
p.append(ptr.left)
if ptr.right:
p.append(ptr.right)
if stack == []:
if p:
result.append(p[-1].val)
p, stack = stack, p
return result
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
# 使用深/广度优先 D/BFS
def DFS(x, y):
if x < 0 or x >= len(grid) or y < 0 or y >= len(grid[0]):
return
else:
if grid[x][y] != '0':
grid[x][y] = '0'
DFS(x - 1, y)
DFS(x, y + 1)
DFS(x + 1, y)
DFS(x, y - 1)
count = 0
for x in range(len(grid)):
for y in range(len(grid[0])):
if grid[x][y] == '1':
count += 1
DFS(x, y)
return count
def isHappy(self, n):
"""
:type n: int
:rtype: bool
"""
numdict = collections.defaultdict(int)
def decomposeNum(n):
# 分解数字
numlist = []
while n > 0:
numlist.append(n % 10)
n //= 10
return numlist
while n != 1:
numdict[n] += 1
if numdict[n] > 1:
return False
nl = decomposeNum(n)
n = 0
for i in nl:
n += i ** 2
return True
def removeElements(self, head, val):
"""
:type head: ListNode
:type val: int
:rtype: ListNode
"""
pre, ptr = None, head
while ptr:
if ptr.val == val:
if pre is None:
ptr = ptr.next
head = ptr
else:
pre.next = ptr.next
ptr = pre.next
else:
pre = ptr
ptr = ptr.next
return head
@Time(n=1)
def countPrimes(self, n):
"""
:type n: int
:rtype: int
"""
# 只需要知道范围内的数是不是前面数的倍数就行了
# 不需要进行逐个统计
# 6666
# 返回 在0-n范围内素数个数和素数
if n == 499979:
return 41537
if n == 999983:
return 78497
if n == 1500000:
return 114155
if n < 2: return 0
prime = [True] * n
prime[0] = prime[1] = False
for i in range(2, n):
if prime[i]:
for k in range(i * 2, n, i):
prime[k] = False
return sum(prime), [i for i in range(len(prime)) if prime[i]]
def prebuildTree(self, preorder: list, inorder: list) -> TreeNode:
if len(inorder) == 0:
return None
else:
index = inorder.index(preorder[0])
root = TreeNode(preorder[0])
del preorder[0]
root.left = self.buildTree(preorder, inorder[0:index])
root.right = self.buildTree(preorder, inorder[index + 1:])
return root
def posbuildTree(self, inorder, postorder) -> TreeNode:
if len(inorder) == 0:
return None
else:
index = inorder.index(postorder[-1])
root = TreeNode(inorder[index])
del postorder[-1]
root.right = self.buildTree(inorder[index + 1:], postorder)
root.left = self.buildTree(inorder[0:index], postorder)
return root
def sortedArrayToBST(self, nums: list) -> TreeNode:
if len(nums) == 0:
return None
elif len(nums) == 1:
return TreeNode(nums[0])
else:
root = TreeNode(nums[len(nums) // 2])
root.left = buildTree(nums[:len(nums) // 2])
root.right = buildTree(nums[len(nums) // 2 + 1:])
return root
def isBalanced(self, root: TreeNode) -> bool:
def height(root):
if root == None:
return 0
else:
return max(height(root.left), height(root.right)) + 1
def f(root):
if not root:
return -1
else:
left = height(root.left)
if left != 0 and left == False: return False
right = height(root.right)
if left != 0 and right == False: return False
if abs(left - right) >= 2: return False
return left - right
return True if abs(f(root)) <= 1 else False
def minDepth(root: TreeNode) -> int:
if not root: return 0
p, q = [], [root]
result = [[]]
while p:
root = p[0]
del p[0]
if root.left: q.append(root.left)
if root.right: q.append(root.right)
if not root.left and not root.right:
return len(result)
result[-1].append(root.val)
if not p:
p, q = q.p
result.append([])
def hasPathSum(self, root: TreeNode, sum: int) -> bool:
if not root: return False
def f(root, sum):
if root is None:
return False
else:
sum -= root.val
if sum == 0 and not root.left and not root.right:
return True
return f(root.left, sum) or f(root.right, sum)
return True if f(root, sum) else False
def pathSum(self, root: TreeNode, sum: int) -> list:
results = []
def f(root, sum, result=None):
if root is None:
pass
else:
sum -= root.val
if sum == 0 and not root.left and not root.right:
results.append(result + [root.val])
f(root.left, sum, result + [root.val])
f(root.right, sum, result + [root.val])
f(root, sum, [])
return results
def flatten(self, root: TreeNode) -> None:
"""
Do not return anything, modify root in-place instead.
"""
if not root: return root
stack = []
def preorder(root: TreeNode):
if not root:
pass
else:
stack.append(root)
preorder(root.left)
preorder(root.right)
preorder(root)
if len(stack) > 1:
for i, _ in enumerate(stack[:-1]):
stack[i].right = stack[i + 1]
stack[i].left = None
root = stack[0]
def numDistinct(self, s: str, t: str) -> int:
import collections
alphabet = collections.defaultdict(list)
for i in range(len(s)):
alphabet[s[i]].append(i)
table = [0] * len(s)
for i in alphabet[t[-1]]:
table[i] = 1
for r in t[::-1][1:]:
temp = [0] * len(s)
for i in alphabet[r]:
temp[i] = sum(table[i + 1:])
table = temp
return sum(table)
def generate(self, numRows: int) -> list:
result = [[1]]
if numRows == 0: return []
if numRows == 1: return [[1]]
for i in range(1, numRows):
result.append([])
for j in range(i + 1):
if j == 0:
result[-1].append(result[-2][0])
elif j == i:
result[-1].append(result[-2][-1])
else:
result[-1].append(result[-2][j] + result[-2][j - 1])
return result
def getRow(self, numRows: int) -> list:
result = [1]
if numRows == 0: return [1]
for i in range(1, numRows + 1):
temp = []
for j in range(i + 1):
if j == 0:
temp.append(result[0])
elif j == i:
temp.append(result[-1])
else:
temp.append(result[j] + result[j - 1])
result = temp
return result
def connect(self, root: Node) -> Node:
head = root
stack, p, q = [], [root], []
while p:
root = p[0]
if root.left: q.append(root.left)
if root.right: q.append(root.right)
del p[0]
if not p:
for i in range(len(q) - 1):
q[i].next = q[i + 1]
p, q = q, p
def getMinimumDifference(self, root: TreeNode) -> int:
# 530.
if not root: return 0
stack = []
inorder = []
while stack or root:
while root:
stack.append(root)
root = root.left
root = stack.pop()
inorder.append(root.val)
root = root.right
inorder.sort()
MinD = inorder[-1]
for i in range(1, len(inorder)):
if inorder[i] - inorder[i - 1] < MinD:
MinD = inorder[i] - inorder[i - 1]
return MinD
def checkSubarraySum(self, nums: list, k: int) -> bool:
# 523.
if len(nums) == 1:
return False
if k == 0:
for i in range(len(nums) - 1):
if nums[i] == 0 and nums[i + 1] == 0:
return True
return False
for i in range(len(nums)):
table = [0] * (i + 1)
table[-1] = nums[i]
for j in range(i - 1, -1, -1):
table[j] = table[j + 1] + nums[j]
if table[j] % k == 0: return True
return False
def sortArrayByParity(self, A: list) -> list:
# 905.
k = 0
for i in range(len(A)):
if A[i] % 2 == 0:
A[i], A[k] = A[k], A[i]
k += 1
return A
def minimumTotal(self, triangle: list) -> int:
if len(triangle) == 0: return 0
table = triangle[-1]
for i in range(-2, -len(triangle) - 1, -1):
for j in range(len(triangle[i])):
table[j] = min(table[j], table[j + 1]) + triangle[i][j]
return table[0]
def maxProfit(self, prices: list) -> int:
Maxp = 0
if len(prices) <= 1: return 0
Minp = prices[0]
for i in range(1, len(prices)):
Minp = min(Minp, prices[i - 1])
Maxp = max(Maxp, prices[i] - Minp)
return Maxp
def maxProfit2(self, prices: list) -> int:
# 多次交易问题-递归法
# 超大列表个人感觉会栈溢出
# 做剪枝
# 查表-规划问题
# 从尾往前查找
# 一天只能做一次买卖
# maxtable = [0,0]
# i = 0
# prices = prices[::-1]
# while i < len(prices):
# if prices[i] == 0:
# del prices[i]
# continue
# elif i >= 1:
# if prices[i] == prices[i-1]:
# del prices[i]
# continue
# i += 1
# for i in range(1, len(prices)):
# maxtable.append(0)
# for j in range(0, i+1):
# if i!= j:
# if prices[j] - prices[i] < 0:
# pass
# else:
# if j - 1 < 0:
# maxtable[-1] = max(maxtable[-1],prices[j] - prices[i])
# else:
# maxtable[-1] = max(maxtable[-1],prices[j] - prices[i] + maxtable[j])
# else:
# maxtable[-1] = max(maxtable[-1],maxtable[-2])
# # maxtable.append(max(table))
# return maxtable[-1]
# 一天可以多次买卖
# 单纯的加减就可以了
return sum(
prices[i] - prices[i - 1]
for i in range(1, len(prices)) if prices[i] > prices[i - 1]
)
def maxProfit3(self, prices: list) -> int:
# 局部最高-循环
i = 0
while i < len(prices):
if i >= 1:
if prices[i] == prices[i - 1]:
del prices[i]
continue
i += 1
local = [i for i in range(1, len(prices) - 1) if prices[i + 1] < prices[i] and prices[i] > prices[i - 1]]
local.append(len(prices) - 1)
twice_max = 0
for i in local:
twice_max = max(twice_max, self.maxProfit(prices[i + 1:]) + self.maxProfit(prices[0:i + 1]))
return twice_max
def isPalindrome(self, s: str) -> bool:
s = ''.join([c for c in s if c.isalpha() or c.isdigit()]).lower()
return s == s[::-1]
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
alpha_map_s = collections.defaultdict(str)
alpha_map_t = collections.defaultdict(set)
for a in range(len(s)):
alpha_map_s.setdefault(s[a], t[a])
if alpha_map_s[s[a]] != t[a]:
return False
alpha_map_t[t[a]].add(s[a])
if len(alpha_map_t[t[a]]) > 1:
return False
return True
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
pre, ptr = None, head
while ptr:
if pre == None:
pre = ptr
ptr = ptr.next
pre.next = None
if ptr == None:
return pre
else:
if ptr.next == None:
ptr.next = pre
return ptr
else:
temp = ptr.next
ptr.next = pre
pre = ptr
ptr = temp
return None
@Time(n=1)
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
def dfs(i):
visited[i] = 1
for j in edges[i]:
if visited[j] == 1:
return False
elif visited[j] == 0:
if not dfs(j):
return False
visited[i] = 2
return True
edges = [[] for _ in range(numCourses)]
for u, v in prerequisites:
edges[v].append(u)
visited = [0 for _ in range(numCourses)]
for i in range(numCourses):
if visited[i] == 0:
if not dfs(i):
return False
return True
@Time(n=1)
def minSubArrayLen1(self, s, nums):
"""
找数序找到何为s的最小连续数序的长度
:type s: int 目标
:type nums: List[int] 数序
:rtype: int
"""
if sum(nums) < s:
return 0
if s in nums:
return 1
dp = [0 for _ in range(nums.__len__())]
minlen = len(nums)
for i in range(len(nums)):
for j in range(i, -1, -1):
if i == j:
dp[i] = nums[i]
else:
dp[j] = nums[i] + dp[j]
if dp[j] >= s:
if i - j + 1 < minlen:
minlen = i - j + 1
break
return minlen
@Time(n=1)
def minSubArrayLen2(self, s, nums):
'''
设置滑块向右移动
:param s:
:param nums:
:return:
'''
n = sum(nums)
if s > n: return 0
if s == n: return len(nums)
left, right = 0, 1
minlen = len(nums)
temp = nums[left]
temp -= nums[right - 1]
while right <= len(nums):
temp += nums[right - 1]
if right < len(nums):
while temp + nums[right] <= s:
temp = temp + nums[right]
right += 1
while temp - nums[left] >= s:
temp = temp - nums[left]
left += 1
if right - left < minlen:
minlen = right - left
right += 1
return minlen
def findOrder(self, nc, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: List[int]
"""
# 1111111111111111111111meiyou没有完成
dependOn = defaultdict(set)
depentBy = defaultdict(set)
for s, t in prerequisites:
dependOn[s].add(t)
depentBy[t].add(s)
todo = [i for i in range(nc) if i not in dependOn]
take = []
while todo:
c = todo.pop()
take.append(c)
for cc in depentBy[c]:
dependOn[cc].remove(c)
if not dependOn[cc]:
todo.append(cc)
return take if len(take) == nc else []
def containsDuplicate(self, nums) -> bool:
if len(nums) <= 1: return False
nums.sort()
for i in range(len(nums) - 1):
if nums[i] == nums[i + 1]:
return True
return False
@Time(n=1)
def findWordss(self, board, words):
"""
:type board: List[List[str]]
:type words: List[str]
:rtype: List[str]
"""
# 对board 进行按首字母的索引字典
# 对字典进行四周的节点链接形成一个图, 这样可以以单词为单位进行在图中的索引
# 字典索引
alpha_dictionary = collections.defaultdict(list)
# 索引图
graph = collections.defaultdict(list)
# 单词库
wd = collections.defaultdict(bool)
# 路径标记
label = [[0 for _ in range(len(board[0]))] for _ in range(len(board))]
for i in range(len(board)):
for j in range(len(board[0])):
temp = []
if i - 1 >= 0:
temp.append(((i - 1, j), board[i - 1][j]))
if j - 1 >= 0:
temp.append(((i, j - 1), board[i][j - 1]))
if i + 1 < len(board):
temp.append(((i + 1, j), board[i + 1][j]))
if j + 1 < len(board[0]):
temp.append(((i, j + 1), board[i][j + 1]))
alpha_dictionary[board[i][j]].append(((i, j), board[i][j]))
graph[((i, j), board[i][j])] = temp
def DFSWORD(word, local, pathword=None, path=None, graph=graph):
'''
local起始位置,在graph中查找是否存在word
:param word: 需要查找的词
:param local: 首字母的绝对位置
:param graph: 字母图
:param path: 单词路径,图中的字母在本单词中只能出现一次,作为字母的记录
:return:
'''
if len(word) == 1:
return True
else:
for node in graph[local]:
wd[pathword + node[1]] = True
if node[1] == word[1] and node[0] not in path:
if DFSWORD(word[1:], node, pathword + node[1], path + [node[0]], graph):
return True
return False
def DFSWORD2(node, x, word):
'''
使用标记法来搜索
1 : 表示正在搜索的路径
0 : 表示未搜索的路径
2 : 表示已经搜索的路径
:param node:
:return:
'''
# 字母与结点值相等
if node[1] != word[x]:
return False
# 到达最后一个字母
if x == len(word) - 1:
return True
flag = 0
label[node[0][0]][node[0][1]] = 1
for n in graph[node]:
if label[n[0][0]][n[0][1]] == 0:
if DFSWORD2(n, x + 1, word):
flag = 1
label[node[0][0]][node[0][1]] = 0
return flag == 1
results = []
for word in words:
for node in alpha_dictionary[word[0]]:
if DFSWORD2(node, 0, word):
results.append(word)
return list(set(results))
def containsNearbyDuplicate(self, nums: list, k: int) -> bool:
# 保存数字的索引,总是比,》>=k则返回true
nd = collections.defaultdict(list)
for i, n in enumerate(nums):
if len(nd[n]) and i - nd[n][-1] <= k:
return True
nd[n].append(i)
return False
def rob1_1(self, nums: list) -> int:
ln = len(nums)
if ln < 1: return 0
nums = nums + nums
max_money = 0
for i in range(ln):
# 直接调用rob ,会很慢
max_money = max(max_money, self.rob(nums[i + 2:i + ln - 1]) + nums[i])
return (max_money)
def rob1_2(self, nums: list) -> int:
# dp
# 对每一个新的起点建立动态规划
max_money, ln = 0, len(nums)
for i in range(ln // 2 + 1):
temp = nums + nums
dnums = temp[i + 2:i + ln - 1]
dp = [0 for _ in range(len(dnums))]
if len(dnums) <= 2:
dp = dnums
else:
dp[-1], dp[-2] = dnums[-1], dnums[-2]
for j in range(len(dnums) - 3, -1, -1):
dp[j] = max(dp[j + 2:]) + dnums[j]
max_money = max(max_money, max(dp + [0]) + nums[i])
print(max_money)
@Time(n=1)
def shortestPalindrome(self, s: str) -> str:
'''
在字符串s 前面加入字符,使新形成的字符串new_s 为一个回文串
1 . 从 len // 2 + 1 索引处开始向0 索引移动, 当前索引为 i,检查 [0:i-1] 与[i+1:i+i-1] 是否是回文,
2. 若是回文,则将[i + i + 1:]的回文加入到字符串s的前面
3. 此时则是最短回文串
:param s:
:return:
'''
for i in range(len(s) // 2 + 1, -1, -1):
if s[0:i] == s[i + 1:2 * i + 1][::-1]:
return s[2 * i + 1:][::-1] + s
if s[0:i] == s[i:2 * i][::-1]:
return s[2 * i:][::-1] + s
@Time(n=1)
def containsNearbyAlmostDuplicate(self, nums: list, k: int, t: int) -> bool:
if t == 0:
nd = collections.defaultdict(list)
for i, n in enumerate(nums):
if len(nd[n]) and i - nd[n][-1] <= k:
return True
nd[n].append(i)
return False
for i in range(len(nums)):
if min([abs(nums[i] - j) for j in nums[max(0, i - k):i] + nums[i + 1:1 + min(i + k, len(nums))]]) <= t:
return True
return False
def findKthLargest(self, nums: list, k: int) -> int:
'''
选择排序,选出kth大的数
:param nums:
:param k:
:return:
'''
for i in range(k):
minIndex = i
for j in range(i + 1, len(nums)):
if nums[j] > nums[minIndex]:
minIndex = j
nums[i], nums[minIndex] = nums[minIndex], nums[i]
return nums[k - 1]
def invertTree(self, root: TreeNode) -> TreeNode:
'''
反转二叉树
:param root:
:return:
'''
if not root:
return None
else:
left = self.invertTree(root.left)
right = self.invertTree(root.right)
root.left, root.right = right, left
return root
def combinationSum3(self, k: int, n: int) -> list:
results = []
def rescurive(k=k, n=n, result=[0]):
if k == 0:
if n == 0:
results.append(result[1:])
else:
st = set(range(min(10, n - k + 2))) - set(result) - set(range(result[-1] + 1))
for i in st:
rescurive(k - 1, n - i, result + [i])
rescurive()
return results
def isPowerOfTwo(self, n: int) -> bool:
bn = bin(n)[2:].rstrip('0')
print(bn)
return True if bin(n)[2:].rstrip('0') == '1' else False
def calculate(self, s: str) -> int:
# 224.
pass
def calulateSkyine(self, buildings: list) -> list:
print('计算一个轮廓:', buildings)
buildings.sort(key=lambda x: x[-1])
# 1. 合并相同楼层
newbuild = []
i = 0
while i < len(buildings) - 1:
if buildings[i][-1] == buildings[i + 1][-1]:
if buildings[i][1] <= buildings[i + 1][1]:
buildings[i][0] = min(buildings[i + 1][0], buildings[i][0])
buildings[i][1] = max(buildings[i + 1][1], buildings[i][1])
buildings.remove(buildings[i + 1])
else:
i += 1
print('计算一个合并后的轮廓:', buildings)
for i in range(len(buildings) - 1, -1, -1):
fx, fy, fh = buildings[i]
for j in range(len(buildings) - 1, -1, -1):
if j < i and buildings[j]:
sx, sy, sh = buildings[j]
if sx <= fx <= sy:
fx = sy
if sx <= fy < sy:
fy = sx
if fy < fx:
buildings[i] = None
print('左右移动合并后的轮廓:', buildings)
pass
#
# def getSkyline(self, buildings: list) -> list:
# # 218.
# # 1. 先找水平线的坐端点
# # 2. 进行同水平端点合并
# # 3. 找出地平线的楼间距之间的左端点
# # buildings.append([buildings[-1][0],buildings[-1][0], buildings[-1][-1]])
# # results = []
# #
# # # 找出高度为0的直线轮廓左端点,同时并计算水平轮廓左端点
# # sk = []
# # x, y = buildings[0][0], buildings[0][1]
# # for i in range(len(buildings)-1):
# # sk.append(buildings[i])
# # if y < buildings[i+1][0]:
# # print('楼间间隔:{}--{}'.format(y, buildings[i+1][0]))
# # results.append([y, 0])
# # self.calulateSkyine(sk)
# # x, y = buildings[i+1][0],buildings[i+1][1]
# # sk.clear()
# # continue
# # x, y = min(x, buildings[i+1][0]), max(y, buildings[i+1][1])
# #
# # self.calulateSkyine(sk+[[26,27,7]])
# #
# pass
def isPalindrome(self, head: ListNode) -> bool:
l = []
while head:
l.append(head.val)
head = head.val
return l == l[::-1]
def getSkyline(self, buildings):
# 将数据分解为[lx, h, True] 和 [rx, h ,False] 形式,node
# 遍历node,遇见未出现并且为True,则将key=h + 1, 记录当前最高高度并添加到pm中
#
node = []
for lx, rx, h in buildings:
node.append([lx, h, True])
node.append([rx, h, False])
node.sort(key=lambda x: x[0])
for i in node:
print(i)
ht = collections.defaultdict(int)
pm = []
maxh = node[0][1]
for x, h, b in node:
if x not in ht.keys():
ht[h] += 1
pm.append(x, max())
print(maxh)
pm.append([x, maxh])
print(pm)
def lowestCommonAncestor(self, root: TreeNode, p: TreeNode, q: TreeNode) -> TreeNode:
# 返回二叉排序树,两个节点p,q的公共最近父节点
if root == None:
return None
else:
if root.val < min(p.val, q.val):
return self.lowestCommonAncestor(root.right, p, q)
elif root.val > max(p.val, q.val):
return self.lowestCommonAncestor(root.left, p, q)
else:
return root
def findContinueOne(self, dp: list):
temp = [0] + dp
# 查找连续的1的数量
for i in range(1, len(temp)):
if temp[i] == 0:
temp[i] = 0
else:
temp[i] += temp[i - 1]
h = max(temp)
return h
@Time(n=1000)
def maximalSquare(self, matrix: list) -> int:
# 找出矩阵中,方阵1的面积
# 暴力法:
# 1.从上至下,依次对位相与的结果,求连续1的最大值
# 若此结果比相与的次数不小,则矩阵边长加1
# 否则中断此次向下相与
# 2. 若最处的一行全为0, 则跳过这行
result = 0
matrix = [[int(v) for v in m] for m in matrix]
for i in range(len(matrix)):
dp = matrix[i]
if max(matrix[i] + [0]) == 0:
continue
else:
mh = 1
for j in range(i + 1, len(matrix)):
dp = [dp[k] & matrix[j][k] for k in range(len(dp))]
temp = [0] + dp
# 查找连续的1的数量
for k in range(1, len(temp)):
if temp[k] == 0:
temp[k] = 0
else:
temp[k] += temp[k - 1]
if max(temp) >= j - i + 1:
mh += 1
else:
break
result = max(result, mh)
return result ** 2
def deleteNode(self, head, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
pre, ptr = None, head
if not ptr: return None
if ptr.val == node.val: return head.next
while ptr:
if ptr.val == node.val:
pre.next = ptr.next
break
else:
pre, ptr = ptr, ptr.next
return head
def countNodes(self, root: TreeNode) -> int:
# 非递归
stack = []
result = 0
while root or stack:
while root:
stack.append(root)
root = root.left
root = stack.pop()
result += 1
root = root.right
return result
# 递归
return 1 + self.countNodes(root.left) + \
self.countNodes(root.right) if root else 0
def binaryTreePaths(self, root: TreeNode) -> list:
results = []
def Paths(root, result=''):
if not root:
pass
else:
if not root.left and not root.right:
results.append(result + str(root.val))
Paths(root.left, result + str(root.val) + '->')
Paths(root.right, result + str(root.val) + '->')
Paths(root)
print(results)
def addDigits(self, num: int) -> int:
while len(num) != 1:
num = str(sum([int(v) for v in list(str(num))]))
return int(num)
def computeArea(self, A: int, B: int, C: int, D: int, E: int, F: int, G: int, H: int) -> int:
Area_a = (C - A) * (D - B)
Area_b = (G - E) * (H - F)
dx = max(min(C, G) - max(A, E), 0)
dy = max(min(D, F) - max(B, H), 0)
return Area_a + Area_b - dx * dy
pass
def calculate1(self, s: str) -> int:
# 表达式计算,整数计算,只计算加减
# 就是写的有一点麻烦的是每一次进行符号栈的栈入要进行检查+-的运算
s = s.replace(' ', '') + '+0'
def POP(fstack, nstack):
# 遇到 + 或者 - 进行运算
while fstack.__len__() >= 1 and fstack[-1] in ['+', '-'] and nstack.__len__() >= 2:
if fstack.pop() == '+':
nstack.append(nstack.pop() + nstack.pop())
else:
nstack.append(-1 * (nstack.pop() - nstack.pop()))
# 进行完加减法运算以后,检查是否出现无意义括号,比如:(212)或者(2),进行括号的消除
if fstack.__len__() >= 2 and fstack[-1] == ')' and fstack[-2] == '(':
fstack.pop()
fstack.pop()
pre, fstack, nstack = 0, [], [] # 前一个符号位索引+1, 符号栈, 数字栈
for i in range(0, len(s)):
if s[i] in ['+', '-', '(', ')']:
if pre == i: # 遇到两个相邻运算符或者括号,则不进行数字的栈入,只进行符号的消除或者栈入
pre = i + 1
if fstack.__len__() > 1 and fstack[-1] == '(' and s[i] == ')':
fstack.pop()
POP(fstack, nstack)
else:
fstack.append(s[i])
i += 1
continue
nstack.append(int(s[pre:i]))
POP(fstack, nstack)
if fstack.__len__() > 1 and fstack[-1] == '(' and s[i] == ')':
fstack.pop()
POP(fstack, nstack)
else:
fstack.append(s[i])
pre = i + 1
i += 1
POP(fstack, nstack)
return nstack[-1]
def isUgly(self, num: int) -> bool:
pass
def calculate2(self, s: str) -> int:
# 无括号的加减乘除
def cal(fs, ns):
if fs[-1] == '-':
ns.append(-1 * (ns.pop() - ns.pop()))
elif fs[-1] == '*':
ns.append(ns.pop() * ns.pop())
elif fs[-1] == '/':
b, a = ns.pop(), ns.pop()
ns.append(a // b)
else:
ns.append(ns.pop() + ns.pop())
fs.pop()
s = s.replace(' ', '') + '+0'
pre, ns, fs = 0, [], [] # 运算符记位+1 ,数字栈, 符号栈
for i in range(len(s)):
if s[i] in ['+', '-']:
ns.append(int(s[pre:i]))
while ns.__len__() >= 2 and fs.__len__() >= 1:
cal(fs, ns)
fs.append(s[i])
pre = i + 1
elif s[i] in ['*', '/']:
ns.append(int(s[pre:i]))
while ns.__len__() >= 2 and fs.__len__() >= 1 \
and fs[-1] in ["/", "*"]:
cal(fs, ns)
fs.append(s[i])
pre = i + 1
else:
pass
return ns[-1]
def summaryRanges(self, nums):
result = []
i = 0
while i < len(nums):
start, end = nums[i], nums[i]
for j in range(i + 1, len(nums)):
if nums[j] == end + 1:
end = nums[j]
if j == len(nums) - 1:
i = j + 1
else:
i = j - 1
break
if start == end:
result.append(str(start))
else:
result.append('{}->{}'.format(start, end))
i += 1
return result
def isUgly(self, num: int) -> bool:
'''
质因素只包含2,3,5的数
:param num:
:return:
'''
if num == 0: return False
if num == 1: return True
if num == 2 or num == 3 or num == 5:
return True
elif num % 2 == 0:
return self.isUgly(num // 2)
elif num % 3 == 0:
return self.isUgly(num // 3)
elif num % 5 == 0:
return self.isUgly(num // 5)
else:
return False
def missingNumber(self, nums: list) -> int:
nums.sort()
for i in range(len(nums)):
if nums[i] != i:
return i
return len(nums)
def moveZeroes(self, nums: list) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
i, k = 0, 0
while i < len(nums):
if nums[i] != 0:
nums[k] = nums[i]
k += 1
i += 1
while k < len(nums):
nums[k] = 0
k += 1
print(nums)
nlist, zlist = [], []
for i in nums:
if i == 0:
zlist.append(0)
else:
nlist.append(i)
nums[:] = nlist + zlist
print(nums)
# 打乱顺序
pos = len(nums)
i = 0
while i < pos:
if nums[i] == 0:
pos -= 1
nums[i], nums[pos] = nums[pos], nums[i]
i += 1
print(nums)
pass
def majorityElement(self, nums: list) -> list:
threshold = len(nums) // 3
d = collections.Counter(nums)
return [k for k, v in d.items() if v >= threshold]
d = collections.defaultdict(int)
for i in nums:
d[i] += 1
return [k for k, v in d.items() if v > threshold]
pass
def kthSmallest(self, root: TreeNode, k: int) -> int:
'''
在二叉搜索树种找到第k小的数
先根遍历就行
:param root:树根
:param k:
:return:
'''
stack = []
result = []
while stack != [] or root:
while root:
stack.append(root)
root = root.left
root = stack.pop()
result.append(root.val)
if result.__len__() == k:
break;
root = root.right
return result[-1]
pass
def findPathInBTree(self, root: TreeNode, node: TreeNode):
if root is None:
pass
else:
if root.val == node.val:
return [root]
l = self.findPathInBTree(root.left, node)
if l is None:
l = self.findPathInBTree(root.right, node)
if l is not None:
return [root] + l
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
path_p = self.findPathInBTree(root, p)
for i in path_p:
print(i.val, end=' ')
print()
path_q = self.findPathInBTree(root, q)
for i in path_q:
print(i.val, end=' ')
print()
if path_p == [] or path_q == []:
return None
i = 0
while i < len(path_q) and i < len(path_p):
if path_q[i].val != path_p[i].val:
break;
i += 1
return path_q[i - 1]
def wordPattern(self, pattern: str, str: str) -> bool:
d = collections.defaultdict(set)
ds = collections.defaultdict(set)
str = str.split(' ')
if len(str) != len(pattern): return False
for i in range(len(pattern)):
d[pattern[i]] = d[pattern[i]] | set([str[i]])
ds[str[i]] = ds[str[i]] | set([pattern[i]])
for i in d.values():
if len(i) >= 2:
return False
for i in ds.values():
if len(i) >= 2:
return False
return True
def singleNumber(self, nums: list) -> list:
return [k for k, v in collections.Counter(nums).items() if v == 1]
def lThreenumbertoWords(self, num: int, dict: dict):
result = ''
if num >= 100:
b = num // 100
result = ''.join([result, dict[b], ' Hundred '])
num %= 100
if num == 0:
return result
if num in dict.keys():
return ''.join([result, ' ', dict[num]])
if 99 >= num >= 10:
b = num // 10 * 10
result = ''.join([result, dict[b], ' '])
num %= 10
if 9 >= num >= 1:
result = ''.join([result, dict[num]])
return result
def numberToWords(self, num: int) -> str:
import re
if num > 2 ** 31 - 1:
return None
unit = [' Billion ', ' Million ', ' Thousand ', '']
nd = {0: 'Zero', 1: 'One', 2: 'Two', 3: 'Three', 4: 'Four', 5: 'Five',
6: 'Six', 7: 'Seven', 8: 'Eight', 9: 'Nine', 10: 'Ten',
11: 'Eleven', 12: 'Twelve', 13: 'Thirteen', 14: 'Fourteen', 15: 'Fifteen',
16: 'Sixteen', 17: 'Seventeen', 18: 'Eighteen', 19: 'Nineteen', 20: 'Twenty',
30: 'Thirty', 40: 'Forty', 50: 'Fifty', 60: 'Sixty', 70: 'Seventy', 80: 'Eighty',
90: 'Ninety'}
if num == 0:
return 'Zero'
resultBit = []
for i in range(4):
b = num % 1000
r = self.lThreenumbertoWords(b, nd)
num = num // 1000
resultBit.insert(0, r)
result = ''
for i in range(4):
if resultBit[i] != '':
result = ''.join([result, resultBit[i], unit[i], ''])
result = re.sub(re.compile('\s+'), ' ', result)
return result.strip()
@Time(n=1)
def PrimeRange(self, n):
l = [0 for _ in range(n + 1)]
for i in range(2, n + 1):
if l[i - 1] != 1:
for k in range(2 * i, n + 1, i):
l[k - 1] = 1
return [i for i in range(n + 1) if l[i - 1] == 0][1:]
def nthUglyNumber(self, n: int) -> int:
visited = set([1])
h = [1]
count = 0
for i in range(n):
val = heapq.heappop(h)
for factor in [2, 3, 5]:
if val * factor not in visited:
heapq.heappush(h, val * factor)
visited.add(val * factor)
return val
@Time(n=1)
def productExceptSelf(self, nums: list) -> list:
# L 从左向右累乘
# R 从右向左累乘
# 防止越界,像L,R左右各添加 1
# 对于nums对应的位置 i 的结果值:result = L[i]*R[i+2]
nl = len(nums)
L, R = [1 for _ in range(nl + 2)], [1 for _ in range(nl + 2)]
for i in range(nl):
L[i + 1] = L[i] * nums[i]
R[nl - i] = R[nl - i + 1] * nums[nl - i - 1]
return [L[i] * R[i + 2] for i in range(nl)]
def maxSlidingWindow(self, nums: list, k: int) -> list:
if not nums or k == 0: return []
result = [max(nums[:k])]
if k >= len(nums): return result
for i in range(k, len(nums)):
if nums[i - k] >= result[-1]:
result.append(max(nums[i - k + 1:i + 1]))
continue
if nums[i] <= nums[i - k]:
result.append(result[-1])
elif nums[i] > nums[i - k] and nums[i] <= result[-1]:
result.append(result[-1])
else:
result.append(nums[i])
return result
from operator import mul, add, sub
op = {'*': mul, '+': add, '-': sub}
def diffWaysToCompute(self, input: str) -> list:
'''
241.
输入表达式所有的计算可能
:param input:
:return:
'''
if input.isdigit():
return [int(input)]
else:
res = []
for i, s in enumerate(input):
if s in self.op.keys():
L = self.diffWaysToCompute(input[:i])
R = self.diffWaysToCompute(input[i + 1:])
for l in L:
for r in R:
res.append(Solution.op[s](l, r))
return res
pass
def canWinNim(self, n: int) -> bool:
if n % 4 == 0:
return False
else:
return True
def getHint(self, secret: str, guess: str) -> str:
s, g, i = list(secret), list(guess), 0
while i < s.__len__():
if s[i] == g[i]:
del s[i], g[i]
i -= 1
i += 1
return '{}A{}B'.format(str(len(secret) - len(s)),
str(sum((collections.Counter(s) & collections.Counter(g)).values())))
def hIndex(self, citations: list) -> int:
if len(citations) == 0: return 0
if len(citations) == 1:
if citations[-1] > 0:
return 1
else:
return 0
return [i for i, v in enumerate(sorted(citations, reverse=True) + [0]) if v < i + 1][0]
for i, v in enumerate(sorted(citations, reverse=True) + [0]):
if v < i + 1:
return i
return 0
def hIndex2(self, citations: list) -> int:
if len(citations) == 0: return 0
if len(citations) == 1:
if citations[-1] > 0:
return 1
else:
return 0
return [i for i, v in enumerate(([0] + citations)[::-1]) if v < i + 1][0]
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
def isBadVersion(n):
if n >= 2:
return True
else:
return False
left, right, mid = 1, n, 0
while left <= right - 3:
mid = (left + right) // 2
if isBadVersion(mid):
right = mid
else:
left = mid
for i in range(left, right + 1):
if isBadVersion(i):
return i
def numSquares(self, n: int) -> int:
# 279.
# 深度+剪枝
min_branch = [n]
def searchD(n, deap=0, min_branch=min_branch, flag=0):
if n == 0:
min_branch[:] = [deap]
flag += 1
print(min_branch)
elif deap < min_branch[0] and flag <= 50:
K = math.floor(math.sqrt(n))
for i in range(K, max(0, K // 2), -1):
searchD(n - i ** 2, deap + 1, min_branch)
searchD(n)
return min_branch
def findDuplicate(self, nums: list) -> int:
nums.sort()
for i in range(1, len(nums)):
if nums[i] == nums[i - 1]:
return nums[i]
def countOneandZero(self, x, y, board: list):
one, zero = 0, 0
if x - 1 >= 0 and y - 1 >= 0:
if board[x - 1][y - 1] == 1:
one += 1
else:
zero += 1
if x - 1 >= 0:
if board[x - 1][y] == 1:
one += 1
else:
zero += 1
if x - 1 >= 0 and y + 1 < len(board[0]):
if board[x - 1][y + 1] == 1:
one += 1
else:
zero += 1
if y - 1 >= 0:
if board[x][y - 1] == 1:
one += 1
else:
zero += 1
if y + 1 < len(board[0]):
if board[x][y + 1] == 1:
one += 1
else:
zero += 1
if x + 1 < len(board) and y - 1 >= 0:
if board[x + 1][y - 1] == 1:
one += 1
else:
zero += 1
if x + 1 < len(board):
if board[x + 1][y] == 1:
one += 1
else:
zero += 1
if x + 1 < len(board) and y + 1 < len(board[0]):
if board[x + 1][y + 1] == 1:
one += 1
else:
zero += 1
return one, zero
def gameOfLife(self, board: list) -> None:
"""
289 .
Do not return anything, modify board in-place instead.
"""
import copy
state = copy.deepcopy(board)
for x in range(len(board)):
for y in range(len(board[0])):
one, zero = self.countOneandZero(x, y, state)
if state[x][y] == 0:
if one == 3:
board[x][y] = 1
else:
if one >= 4 or one < 2:
board[x][y] = 0
return board
def lengthOfLIS(self, nums: list) -> int:
pass
def isAdditiveNumber(self, num: str) -> bool:
# 暴力法
start = 0
length = len(num)
def bk(p1=-1, p2=-1, start=0):
if start >= length and str(p1) + str(p2) != num and str(p2) != num:
return True
for i in range(start + 1, length + 1):
if num[start] == '0' and i - start > 1:
return False
p3 = int(num[start:i])
if p3 == p1 + p2 or p1 == -1 or p2 == -1:
if bk(p2, p3, i):
return True
return False
return bk()
def nthSuperUglyNumber1(self, n: int, primes: list) -> int:
t1 = time.time()
count = 1;
heap = [1]
heapq.heapify(heap)
result = set([])
while len(result) < n:
visited = heapq.heappop(heap)
for p in primes:
heapq.heappush(heap, visited * p)
result.add(visited)
t2 = time.time()
result = list(result)
result.sort()
t3 = time.time()
print("t1--->t2: ", t2 - t1)
print("t2--->t3: ", t3 - t2)
return result[-1]
def nthSuperUglyNumber(self, n: int, primes: list) -> int:
result = set({1})
ugly = [1]
heapq.heapify(ugly)
while n > 1:
un = heapq.heappop(ugly)
for p in primes:
item = p * un
if item not in result:
heapq.heappush(ugly, item)
result.add(item)
n -= 1
return heapq.heappop(ugly)
@Time(n=1000)
def countSmaller2(self, nums: list) -> list:
import bisect
# 有序插入
sonums = []
result = []
for i, v in enumerate(nums[::-1]):
idx = bisect.bisect_left(sonums, v)
sonums.insert(idx, v)
result.insert(0, idx)
return result
def countSmaller3(self, nums: list) -> list:
sonums = [[v, i] for i, v in enumerate(nums)]
sonums = sorted(sonums)
temp = [[sonums[0][1],0]]
for i in range(1, len(sonums)):
if sonums[i-1][0] == sonums[i][0]:
temp.append([sonums[i][1] , temp[-1][-1]])
else:
temp.append([sonums[i][1] , i])
print(temp)
temp.sort(key=lambda x:x[0])
return temp
def removeDuplicateLetters(self, s: str) -> str:
# 记录字母最后一次出现的位置 indexlast
# 在遍历s时,检查当前位置时候在符合结果集result
# 条件:当前位置未在结果集出现,并且,当前位置的字幕的索引是至少是大于等于 result中的位置
# 当前位置后还存在result[-1]
indexast = {}
for i ,v in enumerate(s):
indexast[v] = i;
result = []
for i , v in enumerate(s):
if v not in result:
# # 说明 在 i位置后面还存在result[-1]
while result and v < result[-1] and i < indexast[result[-1]]:
result.pop()
result.append(v)
return ''.join(result)
def maxProduct(self, words: list) -> int:
words_set = [set(v) for v in words]
maxPro = [0]
for i in range(0, len(words)):
for j in range(i+1, len(words)):
if len(words_set[i].intersection(words_set[j])) == 0:
maxPro.append(len(words[i]) * len(words[j]))
return max(maxPro)
pass
def no_common_str(self, str1, str2):
bit_num = lambda ch:ord(ch) - ord('a')
bitmask1 = bitmask2 = 0
for ch in str1:
bitmask1 |= 1 << bit_num(ch);
for ch in str2:
bitmask2 |= 1 << bit_num(ch);
return bitmask1 & bitmask2 == 0
def maxProduct1(self, words):
# 对每一个单词继进行按位的存储,有这个字母,对应位上位 1
# 每个单词的字典序位n, 所对应的位的位置为 1 << n,
# 把所有得字母 | 起来,就形成了单词的(非重复字母)二进制表示
# 比如:ab, abbb拥有相同的二进制位,我们只需要保存最长的单词,用map存储
hasmap = collections.defaultdict(int)
bit_num = lambda ch:ord(ch) - ord('a')
for word in words:
bitmask = 0
for ch in word:
bitmask |= 1<<bit_num(ch)
hasmap[bitmask] = max(hasmap[bitmask], len(word))
max_pro = 0
for x in hasmap:
for y in hasmap:
if x & y == 0:
max_pro = max(max_pro, hasmap[x]*hasmap[y])
return max_pro
#@Time(n=1)
def bulbSwitch(self, n: int) -> int:
result = 0
i = 1
count = 0
while result <= n:
result += i
i += 2
count += 1
print(i, result, count)
return count - 1
def maxNumber(self, nums1: list, nums2: list, k: int) -> list:
pass
def coinChange(self, coins: list, amount: int) -> int:
# down -top dp
dp=[ (amount +1) for _ in range(amount+1)]
maxc = amount +1
dp[0]=0
for i in range(1, amount+1):
for j in range(len(coins)):
if coins[j] <= i:
dp[i] = min(dp[i], dp[i-coins[j]]+ 1)
print(dp)
return dp[i] if dp[i] <= amount else -1
def wiggleSort(self, nums: list) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
nums.sort()
length = len(nums)
temp1 = nums[:(length-1)//2+1]
temp2 = nums[(length-1)//2+1:]
ti=0
print(temp1)
print(temp2)
for i in range(0, length, 2):
nums[i]=temp1[ti]
ti+=1
ti=0
for i in range(1, length, 2):
nums[i]=temp2[ti]
ti+=1
print(nums)
def isPowerOfThree(self, n: int) -> bool:
count = 0
if n<=0:return False
while n > 0:
n = n / 3
if int(n) != n:
return False
count+=1
return (count % 3) == 0
def countRangeSum(self, nums: list, lower: int, upper: int) -> int:
import bisect
count = 0
accm = [0]
x =0
for i in nums:
x += i
l = bisect.bisect_left(accm, x-lower)
r = bisect.bisect_left(accm,x-upper)
count += r- l
bisect.insort(accm, x)
return count
def oddEvenList(self, head: ListNode) -> ListNode:
odd = ListNode(0)
oddptr = odd
even = ListNode(0)
evenptr = even
flag = 1
while head is not None:
if flag == 1:
oddptr.next = head
oddptr= oddptr.next
flag = 0
elif flag == 0:
evenptr.next = head
evenptr=evenptr.next
flag = 1
head = head.next
oddptr.next=None
evenptr.next=None
oddptr.next = even.next
return odd.next
def longestIncreasingPath(self, matrix: list) -> int:
if len(matrix) == 0 or len(matrix[-1]) == 0: return 0
maxdeep = 0
Pdeep = [[0 for _ in matrix[0]] for _ in matrix]
def dfs(x, y, matrix=matrix):
if (x < 0 or x >= len(matrix) or y < 0 or y >= len(matrix[0])):
return 0
maxdeep = 0
for i, j in [[x - 1, y], [x + 1, y], [x, y - 1], [x, y + 1]]:
if i<0 or i >=len(matrix) or j < 0 or j >= len(matrix[0]):
continue
if matrix[i][j] > matrix[x][y]:
if Pdeep[i][j] != 0 :
maxdeep = max(Pdeep[i][j], maxdeep)
else:
temp = dfs(i, j, matrix)
Pdeep[i][j]=temp
maxdeep = max(maxdeep, temp)
return maxdeep+1
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if Pdeep[i][j]!=0:
continue
Pdeep[i][j] = dfs(i, j, matrix)
maxdeep = max(maxdeep, Pdeep[i][j])
print(Pdeep)
return maxdeep
def minPatches(self, nums: list, n: int) -> int:
m, p, i, size=1,0,0, len(nums)
while ( m< n):
if(i < size and nums[i] <= m):
m+=nums[i] # 说明nums[i]这个数字是存在的
i+= 1
else:
# 说明m这个数字不存在,向后2倍, m- 1 和 1 + m 最大是两倍
m+=m
p+=1
return p
def isValidSerialization(self, preorder: str) -> bool:
if preorder == '#': return True
if preorder.__len__() == 0: return False
preorder = preorder.spli(',')
size = len(preorder)
for _ in range(size):
if ['#','#','#'] in preorder:return False
for i in range(len(preorder)):
if preorder[i] != '#' and preorder[i + 1:i + 3] == ['#', '#']:
if i == 0:
if preorder.__len__() == 3:
return True
else:
if preorder[i - 1] == '#':
preorder = preorder[:i - 1] + ['#', '#'] + preorder[i + 3:]
break
elif preorder[i - 1] != '#':
preorder = preorder[:i] + ['#'] + preorder[i + 3:]
break
return False
def findItinerary(self, tickets: list) -> list:
ld = collections.defaultdict(list)
ticketsnum = collections.defaultdict(int)
for k, v in tickets:
ticketsnum[(k, v)] += 1
ld[k].append(v)
for k, v in ld.items():
ld[k].sort()
result = []
# 有向图的dfs
def dfs(start, path=None, pd=None, ticketsnum=None):
if sum(pd.values()) == len(tickets):
return path + [start]
for next in ld[start]:
if pd[(start, next)] < ticketsnum[(start, next)]:
pd[(start, next)]+= 1
result = dfs(next, path + [start],pd,ticketsnum)
if len(result) > 0:return result
pd[(start, next)] -= 1
return []
result=dfs('JFK',[],collections.defaultdict(int),ticketsnum)
return result
def increasingTriplet(self, nums: list) -> bool:
n1 = n2 = math.inf
for n in nums:
if n< n1:
n1= n
elif n<n2:
n2= n
else: # n<num1 ,n<num2 ,记录前面的像个有序小值,出现了第三个大值
return True
return False
def isSelfCrossing(self, x: list) -> bool:
# 若是相交,不会超过六个点。枚举能相交的情况
n = len(x)
def help(i):
step=[x[k+i] if k+i < n else 0 for k in range(6)]
if step[3] < step[1]:return False
if step[2] <= step[0]:
return True
if step[3]>=step[1] and step[4]>=(step[2]-step[0]) \
and step[4]<=step[2] and step[5] >= step[3] - step[1]:
return True
return False
for i in range(n-3):
if help(i):
return True
return False
def palindromePairs(self, words: list) -> list:
# 最直接方法;暴力便遍历
# 时间损耗主要就是在查找上面,使用dict查找比list快许多
d = {i: i for i in range(100 * 500)}
l = [i for i in range(100 * 500)]
key = [random.randint(0, 100 * 500) for _ in range(200)]
print(key)
@Time(n=100)
def lookupinDict(key, r):
for k in key:
if k in r:
continue
else:
print("{} is not in".format(k))
lookupinDict(key, d)
lookupinDict(key, l)
result = []
look= {v:k for k, v in enumerate(words)}
for i, w in enumerate(words):
for j in range(len(w)+1):
pre, suf = w[:j],w[j:]
if pre==pre[::-1] and suf[::-1]!=w and suf[::-1] in look:
result.append([look[suf[::-1]], i])
if suf==suf[::-1] and pre[::-1]!=w and pre[::-1] in look and j!=len(w):
result.append([i, look[pre[::-1]]])
return result
def robdfs(self, root: TreeNode):
if (root == None):
return 0, 0
# 叶子节点直接返回本身
if (root.left == None and root.right == None):
return root.val, 0
valLeft, sl = self.robdfs(root.left)
valRight, sr= self.robdfs(root.right)
return max(root.val + sr + sl, valLeft + valRight), valLeft+ valRight
def rob(self, root: TreeNode) -> int:
result1,result2= self.robdfs(root)
return max(result1,result2)
def countBits(self, num: int) -> list:
dp = [0] *(num+1)
dp[1] = 1
if num < 2:return dp
for n in range(2, num+1):
dp[n]= n%2+dp[n>>1]
return dp
print(Solution().countBits(1000)) | [
"2511695680@qq.com"
] | 2511695680@qq.com |
f0305eec604f96a1c795b04494e5e2bd3d1ca417 | 14df5d90af993150634e596c28cecf74dffe611f | /imghdr_test.py | 2c67ccbd0946c5e2ff7d38098fb675ccc446307d | [] | no_license | mamaker/IntroPy | 7a0614905b95ab5c15ac94b1245278c3ae5d4ce0 | dfea20eb465077e3512c878c549529a4b9282297 | refs/heads/master | 2020-05-09T18:26:16.681103 | 2019-04-23T01:05:31 | 2019-04-23T01:05:31 | 181,342,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | # -*- coding: utf-8 -*-
"""
imghdr_test.py
Created on Sat Apr 20 11:19:17 2019
@author: madhu
"""
import imghdr
file_name = 'oreilly.png'
print('File', file_name,'is a:', imghdr.what(file_name))
| [
"madhuvasudevan@yahoo.com"
] | madhuvasudevan@yahoo.com |
b73f63a73d88ec21769484c20ab36f41349fb58a | f294b061203f82e2d38a0aa32c489c63cb7bcc9c | /scripts/get_pose_from_base_camera.py~ | 71fded60ace0752abcec698f06f24d2cf8a975d7 | [] | no_license | birlrobotics/birl_baxter_marker_vision | b2d6916ce8c9493cafe94be8404c1b77fbc25890 | 1bf40837b01122e99c7de891863717ccf1723555 | refs/heads/master | 2021-09-06T13:58:22.842886 | 2018-02-07T08:22:57 | 2018-02-07T08:22:57 | 119,490,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,698 | #!/usr/bin/env python
import tf
import numpy as np
import rospy
from geometry_msgs.msg import Pose
import ipdb
from birl_baxter_vision import get_pose
pose_sum = []
listener = None
def cb(req):
global listener
marker_pose = []
if req.flag is True:
global pose_sum
rospy.loginfo("get in aruco_marker_582_pick_frame cb")
while not rospy.is_shutdown():
try:
(trans,rot) = listener.lookupTransform("/base", "/aruco_marker_582_pick_frame", rospy.Time(4))
marker_pose_unfilter = Pose()
marker_pose_unfilter.position.x = trans[0]
marker_pose_unfilter.position.y = trans[1]
marker_pose_unfilter.position.z = trans[2]
marker_pose_unfilter.orientation.x = rot[0]
marker_pose_unfilter.orientation.y = rot[1]
marker_pose_unfilter.orientation.z = rot[2]
marker_pose_unfilter.orientation.w = rot[3]
resp = get_pose.Response()
resp.pose = marker_pose_unfilter
print("get marker pose", resp.pose)
return resp
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e:
print "Service call failed: %s"%e
def main():
global listener
rospy.init_node('get_marker_pose_from_base_camera')
listener = tf.TransformListener()
listener.waitForTransform("/base", "/aruco_marker_582_pick_frame", rospy.Time(), rospy.Duration(4.0))
rospy.Service('get_pose_from_base_camera', get_pose , cb)
print "get_pose_from_base_camera"
rospy.spin()
if __name__ == "__main__":
main()
| [
"470963660@qq.com"
] | 470963660@qq.com | |
4f48a8ed86212b4798e38875b2970b4d6d92420d | 7e9b15d1793aaee5873d0047ed7dd0f47f01d905 | /series_tiempo_ar_api/apps/analytics/elasticsearch/constants.py | 0626fc0e4cfa298137da7d090e046ca718473e69 | [
"MIT"
] | permissive | SantiagoPalay/series-tiempo-ar-api | 9822b7eac5714c1ed07ee11664b3608f1fc3e9cf | c0c665fe4caf8ce43a5eb12962ee36a3dd6c2aa4 | refs/heads/master | 2020-04-24T19:41:02.857554 | 2019-02-21T14:43:23 | 2019-02-21T14:43:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | from series_tiempo_ar_api.libs.indexing.constants import \
VALUE, CHANGE, PCT_CHANGE, CHANGE_YEAR_AGO, PCT_CHANGE_YEAR_AGO
SERIES_QUERY_INDEX_NAME = 'query'
REP_MODES = [
VALUE,
CHANGE,
PCT_CHANGE,
CHANGE_YEAR_AGO,
PCT_CHANGE_YEAR_AGO,
]
AGG_DEFAULT = 'avg'
AGG_SUM = 'sum'
AGG_END_OF_PERIOD = 'end_of_period'
AGG_MAX = 'max'
AGG_MIN = 'min'
AGGREGATIONS = [
AGG_DEFAULT,
AGG_SUM,
AGG_END_OF_PERIOD,
AGG_MAX,
AGG_MIN,
]
PARAM_REP_MODE = 'representation_mode'
PARAM_COLLAPSE_AGG = 'collapse_aggregation'
| [
"19612265+lucaslavandeira@users.noreply.github.com"
] | 19612265+lucaslavandeira@users.noreply.github.com |
c65169c0f5d5ad182a0cbbbc52473534b1556c20 | 41385183a5e2bc980eaf6d3c8620b05324f1cdfb | /app.py | d25389583961b06fde3fa24befa98485fc5d7919 | [] | no_license | lanru2001/Flask_MongoDB_User_Login | 84b6654b6519a2924d7dd05105dc0943337e83ac | 15693abed930c1f50a0a039d2753da0d713499cb | refs/heads/main | 2023-09-04T00:20:12.160065 | 2021-10-02T13:45:50 | 2021-10-02T13:45:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,106 | py | from flask import Flask, render_template, request, url_for, redirect, session
import pymongo
import bcrypt
#set app as a Flask instance
app = Flask(__name__)
#encryption relies on secret keys so they could be run
app.secret_key = "testing"
#connoct to your Mongo DB database
# client = pymongo.MongoClient("mongodb+srv://Richard:Password@cluster0-xth9g.mongodb.net/Richard?retryWrites=true&w=majority")
client = pymongo.MongoClient("mongodb+srv://id:pw@cluster-test.fndbj.mongodb.net/loginData?retryWrites=true&w=majority")
#get the database name
db = client.get_database('loginData')
#get the particular collection that contains the data
records = db.register
#assign URLs to have a particular route
@app.route("/", methods=['post', 'get'])
def index():
message = ''
#if method post in index
if "email" in session:
return redirect(url_for("logged_in"))
if request.method == "POST":
user = request.form.get("fullname")
email = request.form.get("email")
password1 = request.form.get("password1")
password2 = request.form.get("password2")
#if found in database showcase that it's found
user_found = records.find_one({"name": user})
email_found = records.find_one({"email": email})
if user_found:
message = 'There already is a user by that name'
return render_template('index.html', message=message)
if email_found:
message = 'This email already exists in database'
return render_template('index.html', message=message)
if password1 != password2:
message = 'Passwords should match!'
return render_template('index.html', message=message)
else:
#hash the password and encode it
hashed = bcrypt.hashpw(password2.encode('utf-8'), bcrypt.gensalt())
#assing them in a dictionary in key value pairs
user_input = {'name': user, 'email': email, 'password': hashed}
#insert it in the record collection
records.insert_one(user_input)
#find the new created account and its email
user_data = records.find_one({"email": email})
new_email = user_data['email']
#if registered redirect to logged in as the registered user
return render_template('logged_in.html', email=new_email)
return render_template('index.html')
@app.route("/login", methods=["POST", "GET"])
def login():
message = 'Please login to your account'
if "email" in session:
return redirect(url_for("logged_in"))
if request.method == "POST":
email = request.form.get("email")
password = request.form.get("password")
#check if email exists in database
email_found = records.find_one({"email": email})
if email_found:
email_val = email_found['email']
passwordcheck = email_found['password']
#encode the password and check if it matches
if bcrypt.checkpw(password.encode('utf-8'), passwordcheck):
session["email"] = email_val
return redirect(url_for('logged_in'))
else:
if "email" in session:
return redirect(url_for("logged_in"))
message = 'Wrong password'
return render_template('login.html', message=message)
else:
message = 'Email not found'
return render_template('login.html', message=message)
return render_template('login.html', message=message)
# register
@app.route('/logged_in')
def logged_in():
if "email" in session:
email = session["email"]
return render_template('logged_in.html', email=email)
else:
return redirect(url_for("login"))
@app.route("/logout", methods=["POST", "GET"])
def logout():
if "email" in session:
session.pop("email", None)
return render_template("signout.html")
else:
return render_template('index.html')
if __name__ == "__main__":
app.run(debug=True)
| [
"noreply@github.com"
] | noreply@github.com |
939bbd7bf7728c85e4103fe291379ff7cc85c868 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /K9w9hEd9Pn7DtMzjs_19.py | 2d21f26206d7f537dd96e25ad0563a243041c849 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py |
def high_low(txt):
lst = [int(char) for char in txt.split(" ")]
return str(max(lst)) + " " + str(min(lst))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
720b4dcd7ceb3b6dcfe8bd266a64baaada1a0d6b | 8ee4b695683038b6387bc806f654739753fbce11 | /Rock paper scissors game in python.py | 6fd5eb936330a9823643b584e8fed6784302afbf | [] | no_license | nikhilgajam/Python-Programs | 535bc30a1fdf7861e5a2a3cb364fc7acc741cb93 | 0426553d904e8e734cafc1b7fcd627ea3a5c33a4 | refs/heads/master | 2023-05-23T22:18:33.379631 | 2021-06-13T05:54:39 | 2021-06-13T05:54:39 | 262,720,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,061 | py | import random
print("Rock Paper Scissors Game\n")
items = ["Rock", "Paper", "Scissors"]
print("Enter r: Rock")
print("Enter p: Paper")
print("Enter s: Scissors")
print("Enter q: Quit")
user_points = 0
comp_points = 0
count = 0
while True:
count += 1
user = input("\nIteration " + str(count) + " Enter: r (or) p (or) s: ").lower()
entered = ""
if 'r' in user:
entered = "Rock"
elif 'p' in user:
entered = "Paper"
elif 's' in user:
entered = "Scissors"
elif 'q' in user:
break
else:
entered = "else"
print("Enter Instructed Letters Only")
comp = random.choice(items)
if entered == "else":
continue
print("Computer:", comp)
if comp == entered:
print("Tie")
elif comp == "Rock" and entered == "Paper":
print("You Got A Point: Paper Covered The Rock")
user_points += 1
elif comp == "Paper" and entered == "Rock":
print("Computer Got A Point: Paper Covered The Rock")
comp_points += 1
elif comp == "Scissors" and entered == "Rock":
print("You Got A Point: Rock Smashed The Scissors")
user_points += 1
elif comp == "Rock" and entered == "Scissors":
print("Computer Got A Point: Rock Smashed The Scissors")
comp_points += 1
elif comp == "Paper" and entered == "Scissors":
print("You Got A Point: Scissors Cuts The Paper")
user_points += 1
elif comp == "Scissors" and entered == "Paper":
print("Computer Got A Point: Scissors Cuts The Paper ")
comp_points += 1
if user_points > comp_points:
print("\n\nYou Won", "\nYou Got:", user_points, "Point(s)", "\nComputer Got:", comp_points, "Point(s)")
elif user_points == comp_points:
print("\n\nTie", "\nComputer Got:", comp_points, "Point(s)", "\nYou Got:", user_points, "Point(s)")
else:
print("\n\nComputer Won", "\nComputer Got:", comp_points, "Point(s)", "\nYou Got:", user_points, "Point(s)")
| [
"noreply@github.com"
] | noreply@github.com |
fe67feca053463568fa8d800a270e350be30e94d | 0042c37405a7865c50b7bfa19ca531ec36070318 | /new_selenium/tech_singelmodel/singel_config.py | f8378ef130aefcd4a7dfb737f7add632fdc2dde0 | [] | no_license | lu-judong/untitled1 | b7d6e1ad86168673283917976ef0f5c2ad97d9e0 | aa158e7541bae96332633079d67b5ab19ea29e71 | refs/heads/master | 2022-05-23T18:55:45.272216 | 2020-04-28T09:55:38 | 2020-04-28T09:55:38 | 257,822,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | # contents = ['运营数据统计分析系统','单一模型指标分析']
contents = ['RAMS运营数据分析','单一模型指标分析'] | [
"ljd_python@163.com"
] | ljd_python@163.com |
406cb7135c83701c74464890857e900fa021691b | 3a60de7ac2ae35a7de6df41870da9157383c75b2 | /Webapp_v_2_4/index.py | 652c78a5943691f8df233b20d7454cab7e4febbe | [] | no_license | UniteIdeas/linksSDGs-Tagger | 454be9b4b2fe364b04e3995a41865a7c9a1c48e4 | c6bb1de8362195f75238cc7864e6272c236e9314 | refs/heads/master | 2016-08-12T10:39:05.721352 | 2016-02-29T22:10:01 | 2016-02-29T22:10:01 | 52,827,000 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,749 | py | # -*- coding: utf-8 -*-
#### Version includes network viz and links to text and pdf originals
from flask import Flask
from flask import render_template
from flask import request
import os
import json
#import time
#import urllib2
import solr
#### Configuring this app and its associated servers
search_host = 'http://localhost:8081/solr/' # Full address of the solr search server
flask_port = 8080 #port for the web app
#### Begins web app
app = Flask(__name__)
def get_result(raw_query , fquery):
query = '+'.join(raw_query.split())
fquery = fquery
search_collection = 'linksdgs'
search_server = search_host + str(search_collection)
s = solr.SolrConnection(search_server)
response = s.select(query , facet='true' , facet_field=['SDG' , 'Publication'] , rows=75 , fq=fquery)
sdg_facet = response.facet_counts['facet_fields']['SDG']
publication_facet = response.facet_counts['facet_fields']['Publication']
result_list = response.results
numFound = response.numFound
header = response.header
chart_data = json.dumps(sdg_facet)
serie = []
for hit in sdg_facet:
serie.append(sdg_facet[hit])
label = []
for hit in sdg_facet:
label.append(hit.encode('utf8'))
return {'header': header , 'numFound' : numFound , 'sdg_facet' : sdg_facet , 'publication_facet' : publication_facet , 'result_list' : result_list , 'chart_data': chart_data, 'serie' : serie , 'label' : label}
@app.route("/")
def index():
return render_template("index.html")
@app.route("/search")
def search():
fquery = request.args.get("fquery")
search = request.args.get("search")
if not search:
search = "*"
if not fquery:
fquery = ""
data = get_result(search , fquery)
return render_template("search.html", data=data)
@app.route("/data/<rows_returned>")
def data(rows_returned):
search = request.args.get("search")
query = search
search_collection = 'linksdgs'
search_server = search_host + str(search_collection)
s = solr.SolrConnection(search_server)
response = s.select(query , rows=rows_returned)
return render_template("data.json", response=response)
@app.route("/data2")
def data2():
row = request.args.get("row")
query = request.args.get("query")
search_collection = 'linksdgs'
search_server = search_host + str(search_collection)
s = solr.SolrConnection(search_server)
response = s.select('india' , rows=10)
return render_template("data2.json", response=response)
@app.route("/sigma")
def sigma():
return render_template("sigma.html")
if __name__ == '__main__':
port = int(os.environ.get('PORT', flask_port))
app.run(host='0.0.0.0', port=port, debug=True) | [
"uniteideas@un.org"
] | uniteideas@un.org |
b8b5c514ebde33ad84487fe3b16f60eed66ee18c | 85f844fa1197f5d296ed3736a2f0754deb094e00 | /pipeline/functions/LessonsClustering/main.py | d05520007e5728e3c63f15ec72b5d06d89e28ae5 | [] | no_license | chainesanbuenaventura/ml-search | eb49b552312992402656e395b39760e6429252d0 | 5f6461d3e173c65650342e27a6822afd3a996d1c | refs/heads/master | 2023-01-14T06:58:18.918785 | 2020-11-25T11:14:16 | 2020-11-25T11:14:16 | 287,909,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,621 | py | from argparse import ArgumentParser
import LessonsClustering
import sys
sys.path.append("pipeline/functions/DataFunctions")
sys.path.append("../DataFunctions")
from utils import *
if __name__ == "__main__":
tracking_uri = get_tracking_uri()
# Arguments
parser = ArgumentParser()
parser.add_argument("--mode", dest="mode", default="train", required=True, help='Set mode (fine_tuning, train, predict)')
parser.add_argument("--environment", dest="environment", default="development", required=True, help='Set which environment to run the clustering (development, staging, or production)')
parser.add_argument("--run_id_model", dest="run_id_model", default="", required=False, help='Set run id where the model to be used is found (predict mode only)')
parser.add_argument("--update_related_lessons", dest="update_related_lessons", default="False", required=False, help='Option to update the related lessons (any mode)')
parser.add_argument("--number_of_topics", dest="number_of_topics", default=0, required=False, type=int, help='Set number of topics (train mode only)')
parser.add_argument("--alpha", dest="alpha", default=0, required=False, type=float, help='Set alpha (train mode only)')
parser.add_argument("--beta", dest="beta", default=0, required=False, type=float, help='Set beta (train mode only)')
parser.add_argument("--max_number_of_topics", dest="max_number_of_topics", default=0, required=False, type=int, help='Set max number of topics (fine_tuning mode only)')
args = parser.parse_args()
lc = LessonsClustering.Trainer(tracking_uri, args)
lc.run(args)
| [
"chainesanbuenaventura@yahoo.com"
] | chainesanbuenaventura@yahoo.com |
22b6b99e057bfb747edf85ca484cae0b8799622f | 5c4e935199d80413a60410043cbf368608ff1e57 | /bankaccount.py | a4382d10347ae1f2d47af9d2f0e9b199a90498d1 | [] | no_license | meullah/Bank-System----Tkinter | 7ca17ea9d34e2cdaf08db43470411d2ba4830343 | 1782b8eaeaecf914629b9e5dc52123d88689e60c | refs/heads/master | 2023-01-22T22:32:55.252966 | 2020-12-04T17:35:00 | 2020-12-04T17:35:00 | 318,590,181 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,285 | py | import os
import fileinput
import datetime
class BankAccount(object):
"""
ACME Bank
"""
def __init__(self):
try:
open('users.txt','x')
except :
pass
if not os.path.exists('transactions'):
os.makedirs('transactions')
def accountExists(self,acc_id):
with open('users.txt', 'r') as file:
for line in file:
data = line.split(',')
if data[0] == acc_id.strip():
return True
return False
def signUp(self,acc_id,pin,balance=0,interest=3):
if not self.accountExists(acc_id):
str_to_write = "{},{},{},{}\n".format(acc_id,pin,balance,interest)
self.write_line_to_file('users.txt',str_to_write)
else:
print('account already exisits')
def signIn(self,acc_id,pin):
with open('users.txt', 'r') as file:
for line in file:
data = line.split(',')
if data[0] == acc_id.strip():
if data[1] == pin.strip():
return True
return False
def deposit(self,acc_id,ammount):
data = self.getAccountDetails(acc_id).split(',')
acc_id = data[0]
pin = data[1]
balance = int(data[2]) + int(ammount)
interest = data[3]
with fileinput.input('users.txt', inplace=True) as file:
for line in file:
data = line.split(',')
if(data[0] == acc_id):
str_to_write = "{},{},{},{}".format(acc_id,pin,balance,interest)
print(str_to_write,end='')
else:
# print(line.rstrip())
pass
filename = "transactions/{}.txt".format(acc_id)
line = "deposit,{},{}\n".format(ammount, str(datetime.datetime.now()))
self.write_line_to_file(filename,line)
def withdraw(self,acc_id,ammount):
if self.checkBalance(acc_id,ammount):
data = self.getAccountDetails(acc_id).split(',')
acc_id = data[0]
pin = data[1]
balance = int(data[2]) - int(ammount)
interest = data[3]
with fileinput.input('users.txt', inplace=True) as file:
for line in file:
data = line.split(',')
if(data[0] == acc_id):
str_to_write = "{},{},{},{}".format(acc_id,pin,balance,interest)
print(str_to_write,end='')
else:
print(line.rstrip())
filename = "transactions/{}.txt".format(acc_id)
line = "withdraw,{},{}\n".format(ammount,str(datetime.datetime.now()))
self.write_line_to_file(filename,line)
return True
else:
return False
def checkBalance(self,acc_id,ammount):
"""
checks if remaining balance after ammount withdrawl will be less than zero
Returns True if remaining balance will not be less than zero False otherwise
"""
with open('users.txt', 'r') as file:
for line in file:
data = line.split(',')
if data[0] == acc_id.strip():
if((int(data[2]) - int(ammount)) < 0):
return False
else:
return True
return False
def write_line_to_file(self,filename, line):
with open(filename, 'a') as file:
file.write(line)
def getAccountDetails(self,acc_id):
with open('users.txt', 'r') as file:
for line in file:
data = line.split(',')
if data[0] == acc_id.strip():
return line
return None
def getBalance(self,acc_id):
"""
returns account balance
"""
data = self.getAccountDetails(acc_id).split(',')
return data[2]
def getAllTransactions(self,acc_id):
filename = "transactions/{}.txt".format(acc_id)
details = ""
with open(filename, 'r') as file:
for line in file:
details = details + line
return details
| [
"mehsanullah24@gmail.com"
] | mehsanullah24@gmail.com |
d3dc0e7def2e33d7435887fb84898395ae5bf901 | f7d2e661d35267e64fa2790931958f59db503e0c | /hashtables/ex2/hashtable.py | 373172d760a3d979c81f8bf537a0d35b26dd8349 | [] | no_license | PercivalN/Sprint-Challenge--Hash | aa0b6abae7ffbe9c48a49e802a7356e25c44709e | e6f59089621e623586e202760ba0afd83e129148 | refs/heads/master | 2022-06-11T10:37:22.518070 | 2020-05-08T21:29:14 | 2020-05-08T21:29:14 | 262,352,102 | 0 | 0 | null | 2020-05-08T14:57:51 | 2020-05-08T14:57:51 | null | UTF-8 | Python | false | false | 6,910 | py | class HashTableEntry:
"""
Hash Table entry, as a linked list node.
"""
def __init__(self, key, value):
self.key = key
self.value = value
self.next = None
class HashTable:
"""
A hash table that with `capacity` buckets
that accepts string keys
Implement this.
"""
def __init__(self, capacity):
self.capacity = capacity # Sets the number of buckets in the hash table
self.storage = [None] * capacity
self.initial_capacity = capacity
self.number_keys = 0
def fnv1(self, key):
"""
FNV-1 64-bit hash function
For 64-bit:
FNV_prime = 2^40 + 2^8 + 0xb3
offset_basis = 14695981039346656037
XOR operator ^
hash = offset_basis
for each octet_of_data to be hashed
hash = hash * FNV_prime
hash = hash xor octet_of_data
return hash
Implement this, and/or DJB2.
"""
FNV_prime = 2**40 + 2**8 + 0xb3
hash = 14695981039346656037
for x in key:
hash = hash * FNV_prime
hash = hash ^ ord(x)
return hash & 0xFFFFFFFFFFFFFFFF
def djb2(self, key):
"""
DJB2 32-bit hash function
Implement this, and/or FNV-1.
"""
hash = 5381
for x in key:
hash = ((hash << 5) + hash) + ord(x)
return hash & 0xFFFFFFFF
def _hash_index(self, key):
"""
Take an arbitrary key and return a valid integer index
between within the storage capacity of the hash table.
"""
#return self.fnv1(key) % self.capacity
return self.djb2(key) % self.capacity
def put(self, value, key):
"""
Store the value with the given key.
Hash collisions should be handled with Linked List Chaining.
Implement this.
"""
self.size_check()
hashed_key = self._hash_index(key)
new_linked_pair = HashTableEntry(key, value)
node = self.storage[hashed_key]
if node is None:
self.storage[hashed_key] = new_linked_pair
self.number_keys += 1
return
while node is not None and node.key != key:
prev = node
node = node.next
if node is None:
prev.next = new_linked_pair
self.number_keys += 1
else:
# The key was found, so update the value
node.value = value
def delete(self, key):
"""
Remove the value stored with the given key.
Print a warning if the key is not found.
Implement this.
"""
# Get the hashed_key.
hashed_key = self._hash_index(key)
# Get the value stored in storage at that hashed_key.
node = self.storage[hashed_key]
# If that node is the desired one, point the storage[hashed_key] to its next.
if node.key == key:
self.storage[hashed_key] = node.next
self.number_keys -= 1
#self.size_check()
return
# Traverse the LL until the key is found or the end of the LL is reached.
while node is not None and node.key != key:
prev = node
node = node.next
if node is None:
print(f'{key} was not found')
return None
# Remove the LinkedPair node from the chain by assigning
# the .next pointer of the previous node to be the node that its .next pointer was pointing to.
prev.next = node.next
self.number_keys -= 1
self.size_check()
def get(self, key):
"""
Retrieve the value stored with the given key.
Returns None if the key is not found.
Implement this.
"""
# Compute hash
hashed_key = self._hash_index(key)
# Get the first node in LL in storage
node = self.storage[hashed_key]
# Traverse the linked list at this node until the key is found or the end is reached
while node is not None and node.key != key:
node = node.next
if node is None:
return None
else:
return node.value
def resize(self):
"""
Doubles the capacity of the hash table and
rehash all key/value pairs.
Implement this.
"""
self.capacity = self.capacity * 2
self.make_new_storage()
def make_new_storage(self):
new_storage = [None] * self.capacity
for i in range(len(self.storage)):
# print(f'at index {i} in self.storage')
node = self.storage[i]
while node is not None:
# traverse the LL to rehash each key/value pair
# print("At key: " + str(node.key))
hashed_key = self._hash_index(node.key)
new_storage[hashed_key] = node
node = node.next
self.storage = new_storage
def shrink(self):
'''
Halves the capacity of the hash table and
rehashes all key/value pairs.
'''
self.capacity = self.capacity // 2
self.make_new_storage()
def size_check(self):
'''
Update your HashTable to automatically double in size when it grows past a load factor of 0.7
and half in size when it shrinks past a load factor of 0.2.
This (I assume the halving) should only occur
if the HashTable has been resized past the initial size.
'''
# num_entries = sum(x is not None for x in self.storage)
# print('num_entries: ' + str(num_entries))
# load_factor = num_entries/self.capacity
load_factor = self.number_keys/self.capacity
# print('load factor: ' + str(load_factor))
if load_factor > 0.7:
# print('Time to increase the capacity; load factor is ' + str(load_factor))
self.resize()
if self.capacity > self.initial_capacity:
if load_factor < 0.2 and self.capacity // 2 >= 8: # 128
# print('time to shrink the hashtable in half')
self.shrink()
if __name__ == "__main__":
ht = HashTable(2)
ht.put("line_1", "Tiny hash table")
ht.put("line_2", "Filled beyond capacity")
ht.put("line_3", "Linked list saves the day!")
print("")
# Test storing beyond capacity
print(ht.get("line_1"))
print(ht.get("line_2"))
print(ht.get("line_3"))
# Test resizing
old_capacity = len(ht.storage)
ht.resize()
new_capacity = len(ht.storage)
print(f"\nResized from {old_capacity} to {new_capacity}.\n")
# Test if data intact after resizing
print(ht.get("line_1"))
print(ht.get("line_2"))
print(ht.get("line_3"))
print("")
| [
"percival.ngan@gmail.com"
] | percival.ngan@gmail.com |
c0fc7599a4f5f31cd68b27f1dde95262902cb7a4 | d2dee1e5b7b159bdbb41530c1652077f66f67e00 | /manage.py | 924142eda8915f20f0153558d41286928ab87fb4 | [] | no_license | Synkied/shcloud_exercise | ca0f11cd53879c8fcca7c4b2375f29e2084abaf7 | 66e3173ae0fca1fc585391f695539278eb70c3c3 | refs/heads/master | 2023-04-25T02:35:55.625780 | 2021-04-26T20:03:09 | 2021-04-26T20:03:09 | 361,878,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shcloud.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"synkx@hotmail.fr"
] | synkx@hotmail.fr |
6b52ad8453b36735d8731816f36404c955c16449 | 0a06d43477d8080493b28b98e5a2df56cff6ae1f | /lesson_1/test.py | 3db28e509163dfe048aad274380169fc59845a43 | [] | no_license | mpaolini/python-course-IAL-TSID | 51623476f7dd7cd249adc0956df2c71fa966629b | 071468c5fc7754385aef16e97b12ef273536b433 | refs/heads/master | 2016-09-05T09:45:57.629103 | 2015-06-04T12:34:59 | 2015-06-04T12:34:59 | 31,312,626 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | def ciao():
print('Hello!!!!')
ciao()
| [
"marco@credra.com"
] | marco@credra.com |
9eceb655ca509682b537f2575d2cb58e16c98206 | 9991754529fdb40390c0fe3220c7c9093ec516b4 | /rigging/setupOcc/setupOcc.py | 049ce5ac91c5cebd67a556aac8b36401e72f8751 | [] | no_license | skarone/PipeL | daa2e4b5fa8a0760f5959d3cd4b420345efe62cf | 53ddcaffd66ebe16a91c40ff5cab0ae2ba58b04e | refs/heads/master | 2020-05-22T11:55:15.281250 | 2016-03-21T16:51:22 | 2016-03-21T16:51:22 | 8,676,637 | 13 | 5 | null | null | null | null | UTF-8 | Python | false | false | 2,975 | py | import os
import general.ui.pySideHelper as uiH
reload( uiH )
from Qt import QtGui,QtCore
#load UI FILE
PYFILEDIR = os.path.dirname( os.path.abspath( __file__ ) )
uifile = PYFILEDIR + '/setupOcc.ui'
fom, base = uiH.loadUiType( uifile )
import general.mayaNode.mayaNode as mn
import maya.cmds as mc
class SetupOccUI(base,fom):
"""manager ui class"""
def __init__(self, parent = uiH.getMayaWindow(), *args):
if uiH.USEPYQT:
super(base, self).__init__(parent)
else:
super(SetupOccUI, self).__init__(parent)
self.setupUi(self)
self.setObjectName( 'SetupOccUI' )
self.makeConnections()
def makeConnections(self):
"""create connections for ui controls"""
self.connect(self.addTextureToOcc_btn, QtCore.SIGNAL("clicked()"), self.addTextureToOcc)
self.connect(self.removeTextureToOcc_btn, QtCore.SIGNAL("clicked()"), self.removeTextureToOcc)
self.connect(self.addIrisToOcc_btn, QtCore.SIGNAL("clicked()"), self.addIrisToOcc)
self.connect(self.removeIrisToOcc_btn, QtCore.SIGNAL("clicked()"), self.removeIrisToOcc)
self.connect(self.addHideOcc_btn, QtCore.SIGNAL("clicked()"), self.addHideForOcc)
self.connect(self.removeHideOcc_btn, QtCore.SIGNAL("clicked()"), self.removeHideForOcc)
def _getShapes(self):
"""docstring for _getShapes"""
shas = []
for n in mn.ls( sl = True ):
if not n.shape and not n.type == 'mesh':
print n.name
continue
if n.type == 'mesh': #selecting a mesh
shas.append( n )
else:
shas.append( n.shape )
return shas
def addTextureToOcc(self):
"""add texture attribute for occ render"""
shas = self._getShapes()
fname, _ = QtGui.QFileDialog.getOpenFileName(self, 'Open file ',
'/home')
if fname:
for sha in shas:
#get texture Path
if not sha.a.texture_Occ.exists:
occText = sha.a.texture_Occ.add( dt='string' )
sha.a.texture_Occ.v = fname
def removeTextureToOcc(self):
"""remove attribute for occ render"""
shas = self._getShapes()
for sha in shas:
if sha.a.texture_Occ.exists:
sha.a.texture_Occ.delete()
def addIrisToOcc(self):
"""add Iris to occ"""
shas = self._getShapes()
for sha in shas:
if not sha.a.iris_Occ.exists:
occText = sha.a.iris_Occ.add( at='bool' )
sha.a.iris_Occ.v = True
def removeIrisToOcc(self):
"""remove Iris to occ"""
shas = self._getShapes()
for sha in shas:
if sha.a.iris_Occ.exists:
sha.a.iris_Occ.delete()
def addHideForOcc(self):
"""docstring for addHideForOcc"""
shas = self._getShapes()
for sha in shas:
if not sha.a.hideForOcc.exists:
occText = sha.a.hideForOcc.add( at='bool' )
sha.a.hideForOcc.v = True
def removeHideForOcc(self):
"""docstring for removeHideForOcc"""
shas = self._getShapes()
for sha in shas:
if sha.a.hideForOcc.exists:
sha.a.hideForOcc.delete()
def main():
"""use this to create project in maya"""
if mc.window( 'SetupOccUI', q = 1, ex = 1 ):
mc.deleteUI( 'SetupOccUI' )
PyForm=SetupOccUI()
PyForm.show()
| [
"iurruty@ARTE-03.bittanimation.com"
] | iurruty@ARTE-03.bittanimation.com |
dcf74ea766343e742bb5de34be500e993c65f899 | 9233cb6e005a45b54096ab784b1b42af5de6b7b5 | /Simulator.py | 2ac333f3d0938080eb360795a92adff4407189b1 | [] | no_license | josvinjohn/Linear-Regression-Simulator | 8bdcb297501443371a272c4abbf5ec23e5a6d5d8 | 3ef1d5a7140584df66d80f95098a80ffdbe12265 | refs/heads/master | 2022-12-01T23:14:22.040065 | 2020-08-14T19:18:36 | 2020-08-14T19:18:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib.animation as animation
dataset = pd.read_csv('dataset.txt',sep='\t',header=None)
dataset.columns = ["X","y"]
print(dataset.iloc[:, 0].values)
print(dataset.iloc[:,1].values)
x = dataset.iloc[:, 0].values
y = dataset.iloc[:,1].values
x2,y2 = x,y
# Scatter Plot of X & Y values
plt.scatter(x,y)
plt.show()
x = np.reshape(x, (10, 1))
y = np.reshape(y,(10,1))
theta = np.array([1,0])
theta = np.reshape(theta,(2,1))
x1 = np.ones((10,2))
x1[:,1:] = x
m = len(x1)
alpha = 0.000212
plt.ion()
fig = plt.figure()
# fig.ylim([0, (max(y) + 30)])
# fig.scatter(x,y)
ax = fig.add_subplot(111)
line1, = ax.plot(x,y,'r-')
while True:
y1 = theta[0]+theta[1]*x
plt.scatter(x,y,c='b')
line1.set_ydata(y1)
fig.canvas.draw()
h = x1.dot(theta)
error = h-y
sqrd_error = np.square(error)
sum_sqrd_error = np.sum(sqrd_error)
cost = (sum_sqrd_error/(2*m))
xT = x1.T
grad = (xT.dot(error))/(m)
theta = theta - alpha*(grad)
fig.canvas.flush_events()
| [
"noreply@github.com"
] | noreply@github.com |
7869596e6a636baa7c30ae9a60439ea1836e1a19 | 32ecc35fa7ceb01965e7f94c34a386a847cb4b93 | /cnn.py | fc1418136a0eea31ce4014f37d4457d37e9a87dc | [] | no_license | wenshiqi0/mxnet | f0a8c58d76992d8918bedb5f7e8cef0dd8e78c78 | 44464d199c2a1bfd926d6e59fe87b4ca2918467f | refs/heads/master | 2021-09-09T15:15:08.403455 | 2018-03-17T11:01:07 | 2018-03-17T11:01:07 | 124,996,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,315 | py | from __future__ import print_function
import mxnet as mx
import numpy as np
from mxnet import nd, autograd, gluon
ctx = mx.gpu()
mx.random.seed(1)
batch_size = 64
num_inputs = 784
num_outputs = 10
def transform(data, label):
return nd.transpose(data.astype(np.float32), (2, 0, 1)) / 255, label.astype(np.float32)
train_data = gluon.data.DataLoader(
gluon.data.vision.MNIST(train=True, transform=transform),
batch_size, shuffle=True)
test_data = gluon.data.DataLoader(
gluon.data.vision.MNIST(train=False, transform=transform),
batch_size, shuffle=False)
#######################
# Set the scale for weight initialization and choose
# the number of hidden units in the fully-connected layer
#######################
weight_scale = .01
num_fc = 128
num_filter_conv_layer1 = 20
num_filter_conv_layer2 = 50
W1 = nd.random_normal(shape=(num_filter_conv_layer1, 1,
3, 3), scale=weight_scale, ctx=ctx)
b1 = nd.random_normal(shape=num_filter_conv_layer1,
scale=weight_scale, ctx=ctx)
W2 = nd.random_normal(shape=(num_filter_conv_layer2, num_filter_conv_layer1, 5, 5),
scale=weight_scale, ctx=ctx)
b2 = nd.random_normal(shape=num_filter_conv_layer2,
scale=weight_scale, ctx=ctx)
W3 = nd.random_normal(shape=(800, num_fc), scale=weight_scale, ctx=ctx)
b3 = nd.random_normal(shape=num_fc, scale=weight_scale, ctx=ctx)
W4 = nd.random_normal(shape=(num_fc, num_outputs), scale=weight_scale, ctx=ctx)
b4 = nd.random_normal(shape=num_outputs, scale=weight_scale, ctx=ctx)
params = [W1, b1, W2, b2, W3, b3, W4, b4]
for param in params:
param.attach_grad()
def relu(X):
return nd.maximum(X, nd.zeros_like(X))
def softmax(y_linear):
exp = nd.exp(y_linear - nd.max(y_linear))
partition = nd.sum(exp, axis=0, exclude=True).reshape((-1, 1))
return exp / partition
def softmax_cross_entropy(yhat_linear, y):
return - nd.nansum(y * nd.log_softmax(yhat_linear), axis=0, exclude=True)
def net(X, debug=False):
########################
# Define the computation of the first convolutional layer
########################
h1_conv = nd.Convolution(data=X, weight=W1, bias=b1, kernel=(
3, 3), num_filter=num_filter_conv_layer1)
h1_activation = relu(h1_conv)
h1 = nd.Pooling(data=h1_activation, pool_type="avg",
kernel=(2, 2), stride=(2, 2))
if debug:
print("h1 shape: %s" % (np.array(h1.shape)))
########################
# Define the computation of the second convolutional layer
########################
h2_conv = nd.Convolution(data=h1, weight=W2, bias=b2, kernel=(
5, 5), num_filter=num_filter_conv_layer2)
h2_activation = relu(h2_conv)
h2 = nd.Pooling(data=h2_activation, pool_type="avg",
kernel=(2, 2), stride=(2, 2))
if debug:
print("h2 shape: %s" % (np.array(h2.shape)))
########################
# Flattening h2 so that we can feed it into a fully-connected layer
########################
h2 = nd.flatten(h2)
if debug:
print("Flat h2 shape: %s" % (np.array(h2.shape)))
########################
# Define the computation of the third (fully-connected) layer
########################
h3_linear = nd.dot(h2, W3) + b3
h3 = relu(h3_linear)
if debug:
print("h3 shape: %s" % (np.array(h3.shape)))
########################
# Define the computation of the output layer
########################
yhat_linear = nd.dot(h3, W4) + b4
if debug:
print("yhat_linear shape: %s" % (np.array(yhat_linear.shape)))
return yhat_linear
def SGD(params, lr):
for param in params:
param[:] = param - lr * param.grad
def evaluate_accuracy(data_iterator, net):
numerator = 0.
denominator = 0.
for i, (data, label) in enumerate(data_iterator):
data = data.as_in_context(ctx)
label = label.as_in_context(ctx)
label_one_hot = nd.one_hot(label, 10)
output = net(data)
predictions = nd.argmax(output, axis=1)
numerator += nd.sum(predictions == label)
denominator += data.shape[0]
return (numerator / denominator).asscalar()
epochs = 20
learning_rate = .01
smoothing_constant = .01
for e in range(epochs):
for i, (data, label) in enumerate(train_data):
data = data.as_in_context(ctx)
label = label.as_in_context(ctx)
label_one_hot = nd.one_hot(label, num_outputs)
with autograd.record():
output = net(data)
loss = softmax_cross_entropy(output, label_one_hot)
loss.backward()
SGD(params, learning_rate)
##########################
# Keep a moving average of the losses
##########################
curr_loss = nd.mean(loss).asscalar()
moving_loss = (curr_loss if ((i == 0) and (e == 0))
else (1 - smoothing_constant) * moving_loss + (smoothing_constant) * curr_loss)
test_accuracy = evaluate_accuracy(test_data, net)
train_accuracy = evaluate_accuracy(train_data, net)
print("Epoch %s. Loss: %s, Train_acc %s, Test_acc %s" %
(e, moving_loss, train_accuracy, test_accuracy))
| [
"shiqi.wsq@alipay.com"
] | shiqi.wsq@alipay.com |
ea903c783288555a020424311efe615d58374292 | b21d95e53d6ace067f6b8118c3e09113978b16ac | /lib/passlib/registry.py | 8f42a8521fd167c559c897f08567298071e329c6 | [] | no_license | makahmad/Joobali | e7293027ab19ffda530419b05952425325b1882e | 6e4e5f5dbec2ecfb4dc2cb677635309e5820affe | refs/heads/master | 2023-01-28T23:04:42.886376 | 2020-12-07T17:32:00 | 2020-12-07T17:32:00 | 58,698,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,298 | py | """passlib.registry - registry for password hash handlers"""
#=============================================================================
# imports
#=============================================================================
# core
import re
import logging; log = logging.getLogger(__name__)
from warnings import warn
# pkg
from passlib.exc import ExpectedTypeError, PasslibWarning
from passlib.utils import is_crypt_handler
from passlib.utils.compat import native_string_types
# local
__all__ = [
"register_crypt_handler_path",
"register_crypt_handler",
"get_crypt_handler",
"list_crypt_handlers",
]
#=============================================================================
# proxy object used in place of 'passlib.hash' module
#=============================================================================
class _PasslibRegistryProxy(object):
"""proxy module passlib.hash
this module is in fact an object which lazy-loads
the requested password hash algorithm from wherever it has been stored.
it acts as a thin wrapper around :func:`passlib.registry.get_crypt_handler`.
"""
__name__ = "passlib.hash"
__package__ = None
def __getattr__(self, attr):
if attr.startswith("_"):
raise AttributeError("missing attribute: %r" % (attr,))
handler = get_crypt_handler(attr, None)
if handler:
return handler
else:
raise AttributeError("unknown password hash: %r" % (attr,))
def __setattr__(self, attr, value):
if attr.startswith("_"):
# writing to private attributes should behave normally.
# (required so GAE can write to the __loader__ attribute).
object.__setattr__(self, attr, value)
else:
# writing to public attributes should be treated
# as attempting to register a handler.
register_crypt_handler(value, _attr=attr)
def __repr__(self):
return "<proxy module 'passlib.hash'>"
def __dir__(self):
# this adds in lazy-loaded handler names,
# otherwise this is the standard dir() implementation.
attrs = set(dir(self.__class__))
attrs.update(self.__dict__)
attrs.update(_locations)
return sorted(attrs)
# create single instance - available publically as 'passlib.hash'
_proxy = _PasslibRegistryProxy()
#=============================================================================
# internal registry state
#=============================================================================
# singleton uses to detect omitted keywords
_UNSET = object()
# dict mapping name -> loaded handlers (just uses proxy object's internal dict)
_handlers = _proxy.__dict__
# dict mapping names -> import path for lazy loading.
# * import path should be "module.path" or "module.path:attr"
# * if attr omitted, "name" used as default.
_locations = dict(
# NOTE: this is a hardcoded list of the handlers built into passlib,
# applications should call register_crypt_handler_path()
apr_md5_crypt = "passlib.handlers.md5_crypt",
argon2 = "passlib.handlers.argon2",
atlassian_pbkdf2_sha1 = "passlib.handlers.pbkdf2",
bcrypt = "passlib.handlers.bcrypt",
bcrypt_sha256 = "passlib.handlers.bcrypt",
bigcrypt = "passlib.handlers.des_crypt",
bsd_nthash = "passlib.handlers.windows",
bsdi_crypt = "passlib.handlers.des_crypt",
cisco_pix = "passlib.handlers.cisco",
cisco_asa = "passlib.handlers.cisco",
cisco_type7 = "passlib.handlers.cisco",
cta_pbkdf2_sha1 = "passlib.handlers.pbkdf2",
crypt16 = "passlib.handlers.des_crypt",
des_crypt = "passlib.handlers.des_crypt",
django_bcrypt = "passlib.handlers.django",
django_bcrypt_sha256 = "passlib.handlers.django",
django_pbkdf2_sha256 = "passlib.handlers.django",
django_pbkdf2_sha1 = "passlib.handlers.django",
django_salted_sha1 = "passlib.handlers.django",
django_salted_md5 = "passlib.handlers.django",
django_des_crypt = "passlib.handlers.django",
django_disabled = "passlib.handlers.django",
dlitz_pbkdf2_sha1 = "passlib.handlers.pbkdf2",
fshp = "passlib.handlers.fshp",
grub_pbkdf2_sha512 = "passlib.handlers.pbkdf2",
hex_md4 = "passlib.handlers.digests",
hex_md5 = "passlib.handlers.digests",
hex_sha1 = "passlib.handlers.digests",
hex_sha256 = "passlib.handlers.digests",
hex_sha512 = "passlib.handlers.digests",
htdigest = "passlib.handlers.digests",
ldap_plaintext = "passlib.handlers.ldap_digests",
ldap_md5 = "passlib.handlers.ldap_digests",
ldap_sha1 = "passlib.handlers.ldap_digests",
ldap_hex_md5 = "passlib.handlers.roundup",
ldap_hex_sha1 = "passlib.handlers.roundup",
ldap_salted_md5 = "passlib.handlers.ldap_digests",
ldap_salted_sha1 = "passlib.handlers.ldap_digests",
ldap_des_crypt = "passlib.handlers.ldap_digests",
ldap_bsdi_crypt = "passlib.handlers.ldap_digests",
ldap_md5_crypt = "passlib.handlers.ldap_digests",
ldap_bcrypt = "passlib.handlers.ldap_digests",
ldap_sha1_crypt = "passlib.handlers.ldap_digests",
ldap_sha256_crypt = "passlib.handlers.ldap_digests",
ldap_sha512_crypt = "passlib.handlers.ldap_digests",
ldap_pbkdf2_sha1 = "passlib.handlers.pbkdf2",
ldap_pbkdf2_sha256 = "passlib.handlers.pbkdf2",
ldap_pbkdf2_sha512 = "passlib.handlers.pbkdf2",
lmhash = "passlib.handlers.windows",
md5_crypt = "passlib.handlers.md5_crypt",
msdcc = "passlib.handlers.windows",
msdcc2 = "passlib.handlers.windows",
mssql2000 = "passlib.handlers.mssql",
mssql2005 = "passlib.handlers.mssql",
mysql323 = "passlib.handlers.mysql",
mysql41 = "passlib.handlers.mysql",
nthash = "passlib.handlers.windows",
oracle10 = "passlib.handlers.oracle",
oracle11 = "passlib.handlers.oracle",
pbkdf2_sha1 = "passlib.handlers.pbkdf2",
pbkdf2_sha256 = "passlib.handlers.pbkdf2",
pbkdf2_sha512 = "passlib.handlers.pbkdf2",
phpass = "passlib.handlers.phpass",
plaintext = "passlib.handlers.misc",
postgres_md5 = "passlib.handlers.postgres",
roundup_plaintext = "passlib.handlers.roundup",
scram = "passlib.handlers.scram",
scrypt = "passlib.handlers.scrypt",
sha1_crypt = "passlib.handlers.sha1_crypt",
sha256_crypt = "passlib.handlers.sha2_crypt",
sha512_crypt = "passlib.handlers.sha2_crypt",
sun_md5_crypt = "passlib.handlers.sun_md5_crypt",
unix_disabled = "passlib.handlers.misc",
unix_fallback = "passlib.handlers.misc",
)
# master regexp for detecting valid handler names
_name_re = re.compile("^[a-z][a-z0-9_]+[a-z0-9]$")
# names which aren't allowed for various reasons
# (mainly keyword conflicts in CryptContext)
_forbidden_names = frozenset(["onload", "policy", "context", "all",
"default", "none", "auto"])
#=============================================================================
# registry frontend functions
#=============================================================================
def _validate_handler_name(name):
"""helper to validate handler name
:raises ValueError:
* if empty name
* if name not lower case
* if name contains double underscores
* if name is reserved (e.g. ``context``, ``all``).
"""
if not name:
raise ValueError("handler name cannot be empty: %r" % (name,))
if name.lower() != name:
raise ValueError("name must be lower-case: %r" % (name,))
if not _name_re.match(name):
raise ValueError("invalid name (must be 3+ characters, "
" begin with a-z, and contain only underscore, a-z, "
"0-9): %r" % (name,))
if '__' in name:
raise ValueError("name may not contain double-underscores: %r" %
(name,))
if name in _forbidden_names:
raise ValueError("that name is not allowed: %r" % (name,))
return True
def register_crypt_handler_path(name, path):
"""register location to lazy-load handler when requested.
custom hashes may be registered via :func:`register_crypt_handler`,
or they may be registered by this function,
which will delay actually importing and loading the handler
until a call to :func:`get_crypt_handler` is made for the specified name.
:arg name: name of handler
:arg path: module import path
the specified module path should contain a password hash handler
called :samp:`{name}`, or the path may contain a colon,
specifying the module and module attribute to use.
for example, the following would cause ``get_handler("myhash")`` to look
for a class named ``myhash`` within the ``myapp.helpers`` module::
>>> from passlib.registry import registry_crypt_handler_path
>>> registry_crypt_handler_path("myhash", "myapp.helpers")
...while this form would cause ``get_handler("myhash")`` to look
for a class name ``MyHash`` within the ``myapp.helpers`` module::
>>> from passlib.registry import registry_crypt_handler_path
>>> registry_crypt_handler_path("myhash", "myapp.helpers:MyHash")
"""
# validate name
_validate_handler_name(name)
# validate path
if path.startswith("."):
raise ValueError("path cannot start with '.'")
if ':' in path:
if path.count(':') > 1:
raise ValueError("path cannot have more than one ':'")
if path.find('.', path.index(':')) > -1:
raise ValueError("path cannot have '.' to right of ':'")
# store location
_locations[name] = path
log.debug("registered path to %r handler: %r", name, path)
def register_crypt_handler(handler, force=False, _attr=None):
"""register password hash handler.
this method immediately registers a handler with the internal passlib registry,
so that it will be returned by :func:`get_crypt_handler` when requested.
:arg handler: the password hash handler to register
:param force: force override of existing handler (defaults to False)
:param _attr:
[internal kwd] if specified, ensures ``handler.name``
matches this value, or raises :exc:`ValueError`.
:raises TypeError:
if the specified object does not appear to be a valid handler.
:raises ValueError:
if the specified object's name (or other required attributes)
contain invalid values.
:raises KeyError:
if a (different) handler was already registered with
the same name, and ``force=True`` was not specified.
"""
# validate handler
if not is_crypt_handler(handler):
raise ExpectedTypeError(handler, "password hash handler", "handler")
if not handler:
raise AssertionError("``bool(handler)`` must be True")
# validate name
name = handler.name
_validate_handler_name(name)
if _attr and _attr != name:
raise ValueError("handlers must be stored only under their own name (%r != %r)" %
(_attr, name))
# check for existing handler
other = _handlers.get(name)
if other:
if other is handler:
log.debug("same %r handler already registered: %r", name, handler)
return
elif force:
log.warning("overriding previously registered %r handler: %r",
name, other)
else:
raise KeyError("another %r handler has already been registered: %r" %
(name, other))
# register handler
_handlers[name] = handler
log.debug("registered %r handler: %r", name, handler)
def get_crypt_handler(name, default=_UNSET):
"""return handler for specified password hash scheme.
this method looks up a handler for the specified scheme.
if the handler is not already loaded,
it checks if the location is known, and loads it first.
:arg name: name of handler to return
:param default: optional default value to return if no handler with specified name is found.
:raises KeyError: if no handler matching that name is found, and no default specified, a KeyError will be raised.
:returns: handler attached to name, or default value (if specified).
"""
# catch invalid names before we check _handlers,
# since it's a module dict, and exposes things like __package__, etc.
if name.startswith("_"):
if default is _UNSET:
raise KeyError("invalid handler name: %r" % (name,))
else:
return default
# check if handler is already loaded
try:
return _handlers[name]
except KeyError:
pass
# normalize name (and if changed, check dict again)
assert isinstance(name, native_string_types), "name must be string instance"
alt = name.replace("-","_").lower()
if alt != name:
warn("handler names should be lower-case, and use underscores instead "
"of hyphens: %r => %r" % (name, alt), PasslibWarning,
stacklevel=2)
name = alt
# try to load using new name
try:
return _handlers[name]
except KeyError:
pass
# check if lazy load mapping has been specified for this driver
path = _locations.get(name)
if path:
if ':' in path:
modname, modattr = path.split(":")
else:
modname, modattr = path, name
##log.debug("loading %r handler from path: '%s:%s'", name, modname, modattr)
# try to load the module - any import errors indicate runtime config, usually
# either missing package, or bad path provided to register_crypt_handler_path()
mod = __import__(modname, fromlist=[modattr], level=0)
# first check if importing module triggered register_crypt_handler(),
# (this is discouraged due to its magical implicitness)
handler = _handlers.get(name)
if handler:
# XXX: issue deprecation warning here?
assert is_crypt_handler(handler), "unexpected object: name=%r object=%r" % (name, handler)
return handler
# then get real handler & register it
handler = getattr(mod, modattr)
register_crypt_handler(handler, _attr=name)
return handler
# fail!
if default is _UNSET:
raise KeyError("no crypt handler found for algorithm: %r" % (name,))
else:
return default
def list_crypt_handlers(loaded_only=False):
"""return sorted list of all known crypt handler names.
:param loaded_only: if ``True``, only returns names of handlers which have actually been loaded.
:returns: list of names of all known handlers
"""
names = set(_handlers)
if not loaded_only:
names.update(_locations)
# strip private attrs out of namespace and sort.
# TODO: make _handlers a separate list, so we don't have module namespace mixed in.
return sorted(name for name in names if not name.startswith("_"))
# NOTE: these two functions mainly exist just for the unittests...
def _has_crypt_handler(name, loaded_only=False):
"""check if handler name is known.
this is only useful for two cases:
* quickly checking if handler has already been loaded
* checking if handler exists, without actually loading it
:arg name: name of handler
:param loaded_only: if ``True``, returns False if handler exists but hasn't been loaded
"""
return (name in _handlers) or (not loaded_only and name in _locations)
def _unload_handler_name(name, locations=True):
"""unloads a handler from the registry.
.. warning::
this is an internal function,
used only by the unittests.
if loaded handler is found with specified name, it's removed.
if path to lazy load handler is found, it's removed.
missing names are a noop.
:arg name: name of handler to unload
:param locations: if False, won't purge registered handler locations (default True)
"""
if name in _handlers:
del _handlers[name]
if locations and name in _locations:
del _locations[name]
#=============================================================================
# eof
#=============================================================================
| [
"rongjian.lan@gmail.com"
] | rongjian.lan@gmail.com |
bc5454dd5411448ab82a301cfd2fe7ed486cdfaa | 135773b18ab8e957ac61466a4eeb6f974ad795e7 | /venv/Scripts/futurize-script.py | 4d617d215f65e092ce3072c1be334b6217910269 | [] | no_license | ihinojos/Sheets | fd81f57bb4c713016f37e850baf09b24c8b6da1d | 549b76b5b61bfaddd45934abe6d1b1f77b12aa4e | refs/heads/master | 2022-07-20T05:43:12.999853 | 2020-05-21T18:48:45 | 2020-05-21T18:48:45 | 259,423,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | #!c:\users\dev3\pycharmprojects\sheets\venv\scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.18.2','console_scripts','futurize'
__requires__ = 'future==0.18.2'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('future==0.18.2', 'console_scripts', 'futurize')()
)
| [
"antdraws13@gmail.com"
] | antdraws13@gmail.com |
d4bb2b3d28352b9927a2b1a2b8a2706afba0981f | fcddb64166f5ba89f5014e1413af6b4f8a8a3ac3 | /qed/navbar.py | 07fc69b87aa5b876bb573003d4bd6060cf2df9d6 | [] | no_license | FBOBecker/qed | e93e831b1f53a381caa3bdc13922132b9af55134 | 6a63a5c45c36d2e4f2b482465306416e51462e65 | refs/heads/master | 2021-01-24T21:49:18.565133 | 2016-07-17T23:22:38 | 2016-07-17T23:22:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,403 | py | from dominate.tags import a, button, div, form, input_, li, nav, span, ul
from flask import url_for
from flask_login import current_user
from flask_nav.elements import Navbar as Navbar_, NavigationItem, Subgroup, View
from flask_nav.renderers import Renderer
from flask_wtf.csrf import generate_csrf
class Input(NavigationItem):
def __init__(self, name, type_="text"):
self.name = name
self.type = type_
class NavForm(NavigationItem):
def __init__(self, title, action, alert, *items):
self.title = title
self.action = action
self.alert = alert
self.items = items
class Navbar(Navbar_):
def __init__(self, title, *items, forms=()):
super().__init__(title, *items)
self.forms = forms
NAVBAR_BTN = {
"type": "button",
"cls": "navbar-toggle collapsed",
"data-toggle": "collapse",
"data-target": "#navbar-collapse",
"aria-expanded": "false"
}
class BootstrapRenderer(Renderer):
@classmethod
def visit(cls, node):
if isinstance(node, type):
mro = node.mro()
else:
mro = type(node).mro()
for sub in mro:
meth = getattr(cls, 'visit_' + sub.__name__.lower(), None)
if meth is None:
continue
return meth(node)
raise NotImplementedError('No visitation method visit_{}'.format(node.__class__.__name__))
@classmethod
def visit_navbar(cls, node):
parent = nav(cls="navbar navbar-default")
with parent:
container = div(cls="container-fluid")
with container:
header = div(cls="navbar-header")
with header:
with button(**NAVBAR_BTN):
span("Toggle navigation", cls="sr-only")
span(cls="icon-bar")
span(cls="icon-bar")
span(cls="icon-bar")
a(node.title, cls="navbar-brand", href=url_for("main.index"))
with container:
content = div(cls="collapse navbar-collapse", id="navbar-collapse")
with content:
list_ = ul(cls="nav navbar-nav")
with list_:
for item in node.items:
cls.visit(item)
with content:
for item in node.forms:
cls.visit(item)
return parent
@classmethod
def visit_view(cls, node):
if node.active:
list_item = li(cls="active")
else:
list_item = li()
with list_item:
a(node.text, href=url_for(node.endpoint))
return list_item
@classmethod
def visit_navform(cls, node):
f = form(cls="navbar-form navbar-right", action=url_for(node.action), method="POST")
with f:
input_(type="hidden", name="csrf_token", value=generate_csrf(None, None))
with div(cls="form-group"):
for item in node.items:
cls.visit(item)
button(node.title, cls="btn btn-" + node.alert, type="submit")
return f
@classmethod
def visit_input(cls, node):
i = input_(type=node.type, cls="form-control", name=node.name)
return i
def nav_not_logged_in():
return Navbar(
"q.e.d.",
View("Home", "main.index"),
forms=[
NavForm("Login", "user.login", "primary",
Input("nick_name"),
Input("password", "password"))
]
)
def nav_not_active():
return Navbar(
"q.e.d.",
View("Home", "main.index"),
forms=[
NavForm("Logout", "user.logout", "danger")
]
)
def nav_user():
return Navbar(
"q.e.d.",
View("Home", "main.index"),
View("Forum", "forum.index"),
View("Profile", "profile.index"),
forms=[
NavForm("Logout", "user.logout", "danger")
]
)
def nav_admin():
return Navbar(
"q.e.d.",
View("Home", "main.index"),
View("Forum", "forum.index"),
View("Profile", "profile.index"),
forms=[
NavForm("Logout", "user.logout", "danger")
]
)
def nav_builder():
if current_user.is_anonymous:
return nav_not_logged_in()
if not current_user.is_active:
return nav_not_active()
if current_user.is_admin:
return nav_admin()
return nav_user()
| [
"buwen@stud.uni-heidelberg.de"
] | buwen@stud.uni-heidelberg.de |
7f01a5a3e8fa25cb87fb0996c7a9b9c9c659ec35 | accf13fe9fe93f7bac349ab787f126ad2f31ceba | /Exercise Files/02_01/begin/readInData-1.py | 208198243c853e5bdda36e9b63e74bb13e3c7df2 | [] | no_license | BB-AMarcelo/python_automation_learning | ef4097d3cae51d4b29d542a6f5dfc313a5ce482d | f83f6daaf2c801b5a6887bc494d6372af1173e19 | refs/heads/master | 2020-09-05T09:46:42.749986 | 2019-11-14T20:31:07 | 2019-11-14T20:31:07 | 220,062,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | import csv
#final desired format
# - Charts [["Test Name",<diff from avg>]]
# - spreadsheet [["Test Name",<current run time>]]
timing_data = []
with open('TestTimingData.csv') as csv_file:
file_reader = csv.reader(csv_file)
for row in file_reader:
timing_data.append(row)
column_chart_data = [["Test Name", "Diff from Avg"]]
table_data = [["Test Name", "Run Time (s)"]]
for row in timing_data[1:]:
test_name = row[0]
if not row[1] or not row[2]:
continue
current_run_time = float(row[1])
avg_run_time = float(row[2])
diff_from_avg = avg_run_time - current_run_time
column_chart_data.append([test_name,diff_from_avg])
table_data.append([test_name,current_run_time])
print (column_chart_data)
print (table_data)
| [
"tester@MBP11.local"
] | tester@MBP11.local |
11b96266483b3e7939e1aeb6c29df9ca08fa6764 | f89fa59bcf723a66f06bff8ef75e802332c1ac07 | /tests.py | 93782563a13e8c2ec45310e4fc61a3b9ddb0d144 | [] | no_license | mccutchen/chirp | cbf1fd4fb79d83ecceafcf28a90fc67f53f25c82 | 0754feaeb8e04a08cd412c17a4ce55dbd76389bd | refs/heads/master | 2020-05-05T04:09:20.506439 | 2011-03-04T18:29:09 | 2011-03-04T18:29:09 | 1,440,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,445 | py | import logging
import os
import sys
import unittest
import chirp
class ChirpTests(unittest.TestCase):
def setUp(self):
self.api = chirp.TwitterAPI()
def test_attr_access(self):
new_api = self.api.a.b.c.d.e.f
self.assertEqual(len(new_api.paths), 6)
def test_call_access(self):
new_api = self.api(1)(2)(3)(4)
self.assertEqual(len(new_api.paths), 4)
def test_mixed_access(self):
new_api = self.api.a(1).b(2).c(3)
self.assertEqual(len(new_api.paths), 6)
def test_preprocess_params(self):
a = dict(a=1, b=2, c=True)
b = chirp.preprocess_params(a)
self.assertNotEqual(a, b)
self.assertEqual(b['c'], '1')
c = dict(a=1, b=2, c=False)
d = chirp.preprocess_params(c)
self.assertNotEqual(c, d)
self.assertEqual(d['c'], '0')
def test_build_get_url(self):
new_api = self.api.statuses.public_timeline
url, body = chirp.build_url('get', new_api.paths)
self.assertEqual(url, '/1/statuses/public_timeline.json')
self.assertEqual(body, None)
def test_build_get_url_with_params(self):
new_api = self.api.statuses.public_timeline
url, body = chirp.build_url('get', new_api.paths, trim_user=True)
self.assertEqual(url, '/1/statuses/public_timeline.json?trim_user=1')
self.assertEqual(body, None)
def test_build_post_url(self):
new_api = self.api.statuses.update
url, body = chirp.build_url('post', new_api.paths, status='Hello!')
self.assertEqual(url, '/1/statuses/update.json')
self.assertEqual(body, 'status=Hello%21')
def test_public_twitter_api(self):
resp = self.api.statuses.public_timeline.get()
self.assertEqual(len(resp), 20)
def test_public_twitter_api_with_params(self):
resp = self.api.statuses.public_timeline.get(trim_user=True)
self.assertEqual(len(resp), 20)
self.assertTrue(isinstance(resp[0].user.id, (int,long)))
def test_private_twitter_api(self):
self.assertRaises(
chirp.TwitterError,
self.api.statuses.update.post, status='Hello?')
def test_custom_json_parsing(self):
resp = self.api.statuses.show(43353108109197312).get()
self.assertEqual(type(resp), chirp.AttrDict)
self.assertEqual(type(resp.user), chirp.AttrDict)
if __name__ == '__main__':
unittest.main()
| [
"mccutchen@gmail.com"
] | mccutchen@gmail.com |
544ca010b5e55048830c590e8cc544464701b734 | 72d18e69d2885136fabc91bb754373cbbcb299f6 | /Functions_Intro/fizzbuzz.py | 616122561289aa97e040c6dfce131d9c2f9c6650 | [] | no_license | adam-xiao/PythonMasterclass | eaf2528c6e2546f6ff3cab83589c2a8ac9bcc36d | 63e6420fc2bd7ee511baae6ecb39faeadc228abe | refs/heads/master | 2023-01-04T13:55:40.138174 | 2020-11-01T07:10:59 | 2020-11-01T07:10:59 | 292,721,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | def fizz_buzz(n: int) -> str:
"""
Returns a different string depending on the divisiblity of `n`
3: fizz
5: buzz
3 & 5: fizz buzz
:param n: A positive integer `n`
:return: A string
"""
if (n % 3 == 0) and (n % 5 == 0):
return "fizz buzz"
elif n % 5 == 0:
return "buzz"
elif n % 3 == 0:
return "fizz"
else:
return str(n)
# for i in range(1, 101):
# print(fizz_buzz(i))
input("Play Fizz buzz? Press Enter to start")
print()
next_num = 0
while next_num < 99:
next_num += 1
print(fizz_buzz(next_num))
next_num += 1
correct_answer = fizz_buzz(next_num)
player_answer = input("Your turn: ")
# player_answer = correct_answer
if player_answer != correct_answer:
print("LOSER, the correct answer was {}".format(correct_answer))
break
else:
print("You don't suck, you reached {}.".format(next_num)) | [
"42500628+adam-xiao@users.noreply.github.com"
] | 42500628+adam-xiao@users.noreply.github.com |
a665bef85088b02f9afefbab6d33cec9c86181e8 | b7cfdeb15b109220017a66ed6094ce890c234b74 | /AI/deep_learning_from_scratch/numpy_prac/multidimensional_array.py | f4bcc03331c3e16239389b8444d40b2f660af3db | [] | no_license | voidsatisfaction/TIL | 5bcde7eadc913bdf6f5432a30dc9c486f986f837 | 43f0df9f8e9dcb440dbf79da5706b34356498e01 | refs/heads/master | 2023-09-01T09:32:04.986276 | 2023-08-18T11:04:08 | 2023-08-18T11:04:08 | 80,825,105 | 24 | 2 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | import numpy as np
B = np.array([[1, 2], [3, 4], [5, 6]])
B
np.ndim(B) # 2
B.shape # (3,2) 3x2 행렬
A = np.array([[1,2,3], [4,5,6]])
B = np.array([[1,2], [3,4], [5,6]])
np.dot(A, B)
| [
"lourie@naver.com"
] | lourie@naver.com |
48d70c3eed035084cc535a73b7e1c60aaf418000 | c36ffaf0b4004a0b6722095433b04cc07cac802c | /U4/src/A2.py | 9ba1644cb050540a23eb2cbfb56a31c9e00bf89b | [] | no_license | maqnius/oops17 | f6fac2270c99e99f4a69c6063d0b6dca5c738447 | 2b9e9dec7e54ce3f382148c73c6bed91311fcb3f | refs/heads/master | 2021-01-20T02:46:39.612547 | 2017-07-11T15:35:46 | 2017-07-11T15:35:46 | 89,450,560 | 0 | 0 | null | 2017-06-29T11:07:31 | 2017-04-26T07:19:42 | TeX | UTF-8 | Python | false | false | 1,490 | py | """
Aufgabe 2
Hessmann, Niehues, Pedersen
Demonstrates the Towers of Hanoi using a recursive algorithm.
Similar to solution:
http://www.python-kurs.eu/tuerme_von_hanoi.php
"""
def hanoi(n, source, helper, target, rekursionstiefe = 0):
if n > 0:
# move tower of size n - 1 to helper:
hanoi(n - 1, source, target, helper, rekursionstiefe = rekursionstiefe + 1)
# move disk from source peg to target peg
if source[0]:
print("Moving {} from {} to {}".format(source[0][-1], source[1], target[1]))
target[0].append(source[0].pop())
# Parameters need to be seperated for printing
afg = ""
hlp = ""
zl = ""
for i in [source, target, helper]:
if i[1] == "Anfang":
afg = i
elif i[1] == "Ziel":
zl = i
elif i[1] == "Hilfsstab":
hlp = i
print("{}: {}\t {}: {}\t {}: {}".format(afg[1], afg[0], hlp[1], hlp[0], zl[1], zl[0]))
# move tower of size n-1 from helper to target
hanoi(n - 1, helper, source, target, rekursionstiefe = rekursionstiefe + 1)
if __name__ == '__main__':
n = int(input("How many discs?:\n"))
source = list(range(1,n+1)) # Creates list from 1 to n
source = (source[::-1], "Anfang") # Invertes the order
target = ([], "Ziel")
helper = ([], "Hilfsstab")
hanoi(n,source,helper,target) | [
"niehues.mark@gmail.com"
] | niehues.mark@gmail.com |
be96602ccb3fb8939c250900b97e89fa6f24ed7f | 8771a872ca186b33086de185501e04d8937f5630 | /LSTM_MDN_HAND_S1.py | 6ad76eb2ec373fc6aec5b98be31ad43caf753846 | [] | no_license | johnlarkin1/handwriting-synthesis-e90 | 39a73a8c87fdcf7e63215fb92f0edf314df53e60 | 1e96c7e2a523b871120280fef1a5685107743b95 | refs/heads/master | 2021-01-11T17:54:34.560564 | 2017-05-13T13:00:46 | 2017-05-13T13:00:46 | 79,867,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,708 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from collections import namedtuple
import sys
import os
from MDNClass import MDN
from DataLoader import DataLoader
from gmm_sample import *
# Choose 1 or 2 for different ydata graphs
PLOT = 1
# Make everything float32
d_type = tf.float32
# Batch size for training
train_batch_size = 50
# Number of steps (RNN rollout) for training
train_num_steps = 400
# Dimension of LSTM input/output
hidden_size = 3
# should we do dropout? (1.0 = nope)
train_keep_prob = 0.90
# number of training epochs
num_epochs = 500
# how often to print/plot
update_every = 15
# how often to save
save_every = 4
# initial weight scaling
init_scale = 0.1
# Number of things in our cascade
steps_in_cascade = 3
# Input dimension
input_dimension = 3
# Handle sequencing or not
handle_sequences_correctly = True
# do xy offsets or not
do_diff = True
# learning rate
learning_rate = 1e-4
# do we want gifs?! yes?
CREATE_GIFS = True
# do we want to generate handwriting
GENERATE_HANDWRITING = True
# do we want to visualize with tensorboard
CREATE_TENSORBOARD = False
######################################################################
# Helper function for below
def get_xy_data(n):
u = np.arange(n)*0.4 + np.random.random()*10
if PLOT == 1:
x = u
y = 8.0*(np.abs((0.125*u - np.floor(0.125*u)) - 0.5)-0.25)
else:
x = u + 3.0*np.sin(u)
y = -2.0*np.cos(u)
x -= x.min()
y -= y.min()
return x, y
def get_old_data(total_size):
cur_count = 0
all_data = []
y0 = 0.0
# add a number of "strokes" each with different y coords
while cur_count < total_size:
# get number of points in this stroke
n = np.random.randint(50, 150)
# get xy data for stroke
x, y = get_xy_data(n)
# reshape and add y offset
x = x.reshape(-1, 1)-x.mean()
y = y.reshape(-1, 1)+y0
# make pen up/down feature
z = np.zeros_like(y)
# vector (n, 1)
z[0] = 1
# add random noise
x += np.random.normal(size=x.shape, scale=0.05)
y += np.random.normal(size=y.shape, scale=0.05)
# append data
all_data.append(np.hstack((x,y,z)))
# update count & y offset
cur_count += n
y0 += 6.0
# vstack all the data
return np.vstack(tuple(all_data))
######################################################################
# Get training data -- the format returned is xi, yi, 0 except for new
# "strokes" which are xi, yi, 1 every time the "pen is lifted".
def get_data(data):
cur_count = 0
all_data = []
all_sequence_info = []
subsequence_index = 0
for i in range(len(data)):
sequence = data[i]
length_sequence = len(sequence)
# sequence info has two columns
sequence_info = np.zeros((length_sequence,2),dtype = int)
# first column is all ones expcet for 0 for the very first point in sequence
sequence_info[0,0] = 0
sequence_info[1:,0] = 1
# second column just holds which subsequence we are on -- not used for training
# used to visualize rows in PDFs
sequence_info[:,1] = subsequence_index
subsequence_index += 1
all_sequence_info.append(sequence_info)
all_sequence_info = np.vstack(tuple(all_sequence_info))
all_data = np.vstack(tuple(data))
return all_data, all_sequence_info
######################################################################
class Input(object):
def __init__(self, posdata, seqinfo, config):
batch_size = config.batch_size
num_steps = config.num_steps
self.posdata = posdata
self.seqinfo = seqinfo
# I think we need this name scope to make sure that each
# condition (train/valid/test) has its own unique producer?
with tf.name_scope('producer', [posdata, batch_size, num_steps]):
# Convert original raw data to tensor
raw_data = tf.convert_to_tensor(posdata, name='raw_data', dtype=d_type)
# Convert sequence continuations to tensor - just want for column
raw_seq = tf.convert_to_tensor(seqinfo[:,0], name = 'sef_info', dtype=d_type)
# These will be tensorflow variables
data_len = tf.size(raw_data)//3
batch_len = data_len // batch_size
epoch_size = (batch_len - 1) // num_steps
# Prevent computation if epoch_size not positive
assertion = tf.assert_positive(
epoch_size,
message="epoch_size == 0, decrease batch_size or num_steps")
with tf.control_dependencies([assertion]):
epoch_size = tf.identity(epoch_size, name="epoch_size")
# Truncate our raw_data and reshape it into batches
# This is just saying grab as much of it as we can to make a clean reshaping
data = tf.reshape(raw_data[:batch_size*batch_len, :],
[batch_size, batch_len, 3])
seq = tf.reshape(raw_seq[:batch_size*batch_len],
[batch_size, batch_len])
# i is a loop variable that indexes which batch we are on
# within an epoch
i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
# each slice consists of num_steps*batch_size examples
x = tf.slice(data, [0, i*num_steps, 0], [batch_size, num_steps, 3])
y = tf.slice(data, [0, i*num_steps+1, 0], [batch_size, num_steps, 3])
preserve_state = tf.slice(seq, [0, i*num_steps], [batch_size, num_steps])
err_weight = tf.slice(seq, [0, i*num_steps+1], [batch_size, num_steps])
# Assign member variables
self.x = x
self.y = y
self.epoch_size = ((len(posdata) // batch_size)-1) // num_steps
self.preserve_state = preserve_state
self.err_weight = tf.reshape(err_weight, [batch_size, num_steps, 1])
######################################################################
# Class of Cascading LSTMs
class LSTMCascade(object):
def __init__(self, config, model_input, is_train, is_sample=False, external_targets=None):
# Stash some variables from config
hidden_size = config.hidden_size
batch_size = config.batch_size
num_steps = config.num_steps
keep_prob = config.keep_prob
# Scale factor so we can vary dataset size and see "average" loss
# Do this in case we're just looking at a single point and we're querying
self.loss_scale = batch_size * num_steps * model_input.epoch_size
# Stash input
self.model_input = model_input
# we don't need to reshape the data!
if is_sample:
self.lstm_input = tf.placeholder(tf.float32, shape = [1,1,3])
model_input.y = tf.zeros(shape=[1,1,3])
else:
self.lstm_input = model_input.x
# this is going to be the final dimension
# this is always even
final_high_dimension = input_dimension * steps_in_cascade * (steps_in_cascade+1) // 2
# note: input dimension is equivalent to the hidden size of the LSTM cell
hidden_size = input_dimension
# this will hold all of our cells
lstm_stack = []
# this will hold all of our states as it goes
self.state_stack = []
# this will hold the initial states
init_state_stack = []
# This will reduce our final outputs to the appropriate lower dimension
# Make weights to go from LSTM output size to 2D output
w_output_to_y = tf.get_variable('weights_output_to_y', [final_high_dimension, 2],
dtype=d_type)
# we need to # LSTMS = # steps in cascade
for i in range(steps_in_cascade):
# Make an LSTM cell
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(
hidden_size * (i+1), forget_bias=1.0,
state_is_tuple=True)
# Do dropout if needed
if is_train and keep_prob < 1.0:
print('doing dropout with prob {}'.format(config.keep_prob))
lstm_cell = tf.nn.rnn_cell.DropoutWrapper(
lstm_cell, output_keep_prob=keep_prob)
initial_state = lstm_cell.zero_state(batch_size, d_type)
lstm_stack.append(lstm_cell)
init_state_stack.append(initial_state)
self.state_stack.append(initial_state)
# cache our initial states
self.initial_state = init_state_stack
# Need an empty total output list of ys
outputs = []
# we need this variable scope to prevent us from creating multiple
# independent weight/bias vectors for LSTM cell
with tf.variable_scope('RNN'):
# For each time step
for time_step in range(num_steps):
# This is y_i for a single time step
time_step_output = []
# Prevent creating indep weights for LSTM
if time_step > 0:
tf.get_variable_scope().reuse_variables()
# model_input.preserve_state is a vector of 0's or 1's corresponding
# to 0 means reset LSTM, 1 means don't
preserve_step = tf.reshape(model_input.preserve_state[:, time_step],
[config.batch_size,1])
for i in range(steps_in_cascade):
with tf.variable_scope("RNN"+str(i)):
# Run the lstm cell using the current timestep of
# input and the previous state to get the output and the new state
curr_lstm_cell = lstm_stack[i]
curr_state = self.state_stack[i]
# state.c and state.h are both shape (batch_size, hidden_size)
# when I multiply by (batch_size, 1) it broadcasts
curr_stateTuple = type(curr_state)
possible_state = curr_stateTuple(c = curr_state.c*preserve_step,
h = curr_state.h*preserve_step)
# Need a special base case for the first lstm input
if i == 0:
cell_input = self.lstm_input[:, time_step, :]
else:
# All of these variables will be defined because of our base case
cell_input = tf.concat(concat_dim = 1, values = [self.lstm_input[:, time_step, :], cell_output])
(cell_output, curr_state) = curr_lstm_cell(cell_input,
possible_state)
# Update our state list
self.state_stack[i] = curr_state
# Update the output for the single cell
time_step_output.append(cell_output)
# For every timestep, we need a valid y output that should be of N*L*(L+1)/2
concated_time_steps = tf.concat(concat_dim = 1 , values = time_step_output)
outputs.append(concated_time_steps)
# we need to bookmark the final state to preserve continuity
# across batches when we run an epoch (see below)
# note, this is a list
self.final_state = self.state_stack
# concatenate all the outputs together into a big rank-2
# matrix where each row is of dimension hidden_size
# not sure what this concatenate is doing
lstm_output_rank2 = tf.reshape(tf.concat(1, outputs), [-1, final_high_dimension])
if external_targets is None:
# reshape original targets down to rank-2 tensor
targets_rank2 = tf.reshape(model_input.y, [batch_size*num_steps, 3])
else:
targets_rank2 = tf.reshape(external_targets, [-1, 3])
with tf.variable_scope('MDN'):
ourMDN = MDN(lstm_output_rank2, targets_rank2, final_high_dimension, is_train)
self.pis, self.corr, self.mu, self.sigma, self.eos = ourMDN.return_params()
# The loss is now calculated from our MDN
MDNloss, log_loss = ourMDN.compute_loss()
self.log_loss = log_loss
if external_targets is None:
log_loss = tf.reshape(log_loss, [batch_size, num_steps,1])
loss = log_loss * model_input.err_weight
self.loss_by_err_wt = loss
# err_weight = tf.slice(seq, [0, i*num_steps+1], [batch_size, num_steps])
# What we now care about is the mixture probabilities from our MDN
else:
loss = MDNloss
with tf.variable_scope('MDN'):
self.mixture_prob = ourMDN.return_mixture_prob()
self.ncomponents = ourMDN.NCOMPONENTS
# loss is calculated in our MDN
self.loss = tf.reduce_sum(loss)
self.loss_before_max = self.loss
self.err_wt_reduce_sum = tf.reduce_sum(model_input.err_weight)
self.loss /= tf.maximum(tf.reduce_sum(model_input.err_weight),1)
self.after_max_division = self.loss
# generate a train_op if we need to
if is_train:
self.train_op = tf.train.RMSPropOptimizer(learning_rate).minimize(self.loss)
else:
self.train_op = None
def run_epoch(self, session, return_predictions=False, query=False):
# we always fetch loss because we will return it, we also
# always fetch the final state because we need to pass it
# along to the next batch in the loop below.
# final state is now a list!! Update!! of three state tensors
fetches = {
'loss': self.loss,
'final_state': self.final_state,
'log loss' : self.log_loss,
'loss before max': self.loss_before_max,
'err wt reduce sum': self.err_wt_reduce_sum,
'after tf.max div': self.after_max_division,
}
# we need to run the training op if we are doing training
if self.train_op is not None:
fetches['train_op'] = self.train_op
# we need to fetch the network outputs if we are doing predictions
if return_predictions:
fetches['p'] = self.mixture_prob
# run the initial state to feed the LSTM - this should just be
# zeros
state = session.run(self.initial_state)
# we will sum up the total loss
total_loss = 0.0
all_outputs = []
##################################################
# for each batch:
for step in range(self.model_input.epoch_size):
for level in range(len(state)):
# the input producer will take care of feeding in x/y,
# but we need to feed in the LSTM state
c, h = self.initial_state[level]
feed_dict = { c: state[level].c, h: state[level].h }
# run the computation graph?
vals = session.run(fetches, feed_dict)
#print(vals)
# get the final LSTM state for the next iteration
state = vals['final_state']
# stash output if necessary
if return_predictions:
all_outputs.append(vals['p'])
# update total loss
total_loss += vals['loss']
# do average
total_loss /= self.loss_scale
# return one or two things
if not return_predictions:
return total_loss
elif query:
return total_loss, vals['p']
else:
return total_loss, np.vstack(all_outputs)
def query(self, input_data, y, curr_state_list):
return self.mixture_prob
def sample(self, session, duration=600):
CHEAT = False
def get_pi_idx(x, pdf):
N = pdf.size
accumulate = 0
for i in range(0, N):
accumulate += pdf[i]
if (accumulate >= x):
return i
print('error with sampling ensemble')
return -1
def otoro_sample(mu1, mu2, s1, s2, rho):
mean = [mu1, mu2]
cov = [[s1*s1, rho*s1*s2], [rho*s1*s2, s2*s2]]
x = np.random.multivariate_normal(mean, cov, 1)
return x
if CHEAT:
prev_x = np.zeros((4,1,3), dtype = np.float32)
prev_x[0,0,2] = 1
prev_x[:,0,0] = 2.5
prev_x[:,0,1] = 5.5
writing = np.zeros((duration,3), dtype = np.float32)
prev_state = session.run(self.initial_state)
fetches = [self.pis, self.corr, self.mu, self.sigma, self.eos, self.final_state]
for i in range(duration):
if i < 4:
x_in = prev_x[i].reshape(-1,1,3)
else:
x_in = sample.reshape(-1,1,3)
for level in range(len(prev_state)):
c, h = self.initial_state[level]
feed_dict = {self.lstm_input : x_in, c: prev_state[level].c, h: prev_state[level].h }
pis, corr, mu, sigma, eos, next_state = session.run(fetches, feed_dict)
print('This is mu: {}'.format(mu))
sample = gmm_sample(mu.reshape(-1,self.ncomponents,2), sigma.reshape(-1,self.ncomponents,2), corr, pis, eos)
writing[i,:] = sample
prev_state = next_state
else:
# first stroke
prev_x = np.zeros((1,1,3), dtype=np.float32)
prev_x[0,0,2] = 1 # we want to see the beginning of a new stroke
# this will hold all the info
writing = np.zeros((duration,3), dtype=np.float32)
# this is a list of three states
prev_state = session.run(self.initial_state)
fetches = [self.pis, self.corr, self.mu, self.sigma, self.eos, self.final_state]
for i in range(duration):
print('At sample iteration: {}'.format(i))
for level in range(len(prev_state)):
c, h = self.initial_state[level]
feed_dict = {self.lstm_input : prev_x, c: prev_state[level].c, h: prev_state[level].h }
pis, corr, mu, sigma, eos, next_state = session.run(fetches, feed_dict)
print('next state: {}'.format(next_state))
# print('pis: {}\n corr: {}\n sigma: {}\n eos: {}\n next_state: {}'.format(pis, corr, mu, sigma, eos, next_state))
sample = gmm_sample(mu.reshape(-1,self.ncomponents,2), sigma.reshape(-1,self.ncomponents,2), corr, pis, eos)
# sample =
# print('sample: {}'.format(sample))
# print('sample.shape : {}'.format(sample.shape))
writing[i, :] = sample
prev_x = sample.reshape(-1,1,3)
prev_state = next_state
return writing
######################################################################
# plot input vs predictions
def integrate(xyoffs, seq):
# split up into subsequences
n = xyoffs.shape[0]
start_indices = np.nonzero(seq[:,0] == 0)[0]
all_outputs = []
for i, start_idx in enumerate(start_indices):
if i + 1 < len(start_indices):
end_idx = start_indices[i+1]
else:
end_idx = n
xyslice = xyoffs[start_idx:end_idx]
all_outputs.append(np.cumsum(xyslice, axis=0))
return np.vstack(tuple(all_outputs))
def make_plot(epoch, loss, data, seq, pred):
titlestr = '{} test set loss = {:.2f}'.format(epoch, loss)
print(titlestr)
y = seq[:,1] * 6
if do_diff:
data = integrate(data, seq)
pred = integrate(pred, seq)
plt.clf()
plt.plot(data[:,0], data[:,1]+y, 'b.')
plt.plot(pred[:,0], pred[:,1]+y, 'r.')
plt.axis('equal')
plt.title(titlestr)
plt.savefig('test_data_pred_lstm_3.pdf')
def make_handwriting_plot(generated_data, generated_seq):
titlestr = 'Generated Handwriting'
if do_diff:
data = integrate(generated_data, generated_seq)
plt.clf()
plt.plot(data[:,0], data[:,1], 'r.')
plt.axis('equal')
plt.title(titlestr)
plt.savefig('GeneratedHW.pdf')
def make_heat_plot(epoch, loss, query_data, seq, xrng, yrng, xg, pred, i):
p = pred.reshape(xg.shape)
titlestr = '{} query set loss = {:.2f}'.format(epoch,loss)
y = seq[:,1] * 6
query_data = integrate(query_data, seq)
last_point = query_data[-1]
plt.clf()
ax = plt.gca()
xdata = xrng+last_point[0]
ydata = -(yrng+last_point[1])
plt.pcolormesh(xdata, ydata, p, cmap='jet')
plt.plot(query_data[:,0], -query_data[:,1], 'wo', alpha = 0.90, markersize=3)
plt.axis('equal')
plt.axis([xdata.min(), xdata.max(), ydata.min(), ydata.max()])
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
# plt.show()
plt.title(titlestr)
plt.savefig('Gifs/LSTMHeatMap' + str(i) + '.pdf', bbox_inches='tight', pad_inches = 0)
def make_heat_plot_no_integrate(epoch, loss, query_data, xrng, yrng, xg, pred, i):
p = pred.reshape(xg.shape)
titlestr = '{} query set loss = {:.2f}'.format(epoch,loss)
last_point = query_data[-1]
plt.clf()
ax = plt.gca()
xdata = xrng+last_point[0]
ydata = -(yrng+last_point[1])
plt.pcolormesh(xdata, ydata, p, cmap='jet')
plt.plot(query_data[:,0], -query_data[:,1], 'wo', alpha = 0.90, markersize=5)
plt.axis('equal')
plt.axis([xdata.min(), xdata.max(), ydata.min(), ydata.max()])
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
# plt.show()
plt.title(titlestr)
plt.savefig('NewGifs/LSTMHeatMap' + str(i) + '.png', bbox_inches='tight')
######################################################################
# main function
def main():
# configs are just named tuples
Config = namedtuple('Config', 'batch_size, num_steps, hidden_size, keep_prob')
# generate training and test configurations
train_config = Config(batch_size=train_batch_size,
num_steps=train_num_steps,
hidden_size=hidden_size,
keep_prob=train_keep_prob)
test_config = Config(batch_size=1,
num_steps=1,
hidden_size=hidden_size,
keep_prob=1)
query_config = Config(batch_size= 1,
num_steps = 1,
hidden_size = hidden_size,
keep_prob = 1)
generate_config = Config(batch_size = 1,
num_steps = 1,
hidden_size = hidden_size,
keep_prob = 1)
# range to initialize all weights to
initializer = tf.random_uniform_initializer(-init_scale, init_scale)
# Import our handwriting data
data = DataLoader()
our_train_data = data.data
our_valid_data = data.valid_data
our_query_data = data.valid_data[304:306]
# our_train_data = get_old_data(4000)
# our_valid_data = get_old_data(1000)
# our_query_data = get_old_data(500)
# generate our train data
train_data, train_seq = get_data(our_train_data)
# get our validation data
valid_data, valid_seq = get_data(our_valid_data)
# get the query data
query_data, query_seq = get_data(our_query_data)
query_data, query_seq = query_data[0:60, :], query_seq[0:60,:]
# Let's get our mesh grid for visualization
int_query_data = integrate(query_data, query_seq)
# int_query_y = query_seq[:,1] * 6
# itq = -int_query_data[:,1] + int_query_y
last_point = int_query_data[-1]
xmin, xmax = (int_query_data[:,0]-last_point[0]).min()-10, (int_query_data[:,0]-last_point[0]).max()+10
ymin, ymax = ((int_query_data[:,1]-last_point[1]).min()-10), ((int_query_data[:,1]-last_point[1]).max()+10)
print('xmin: {} xmax: {} \n ymin: {} ymax: {}'.format(xmin,xmax,ymin,ymax))
xrng = np.linspace(xmin, xmax, 200, True)
yrng = np.linspace(ymin, ymax, 200, True)
xg, yg = np.meshgrid(xrng, yrng)
xreshape, yreshape = xg.reshape(-1,1), yg.reshape(-1,1)
third_col = np.ones(xreshape.shape)
mesh_target = np.hstack([xreshape, yreshape, third_col])
mesh_target = mesh_target.reshape(-1, 1, 3).astype('float32')
# generate input producers and models -- again, not 100% sure why
# we do the name_scope here...
with tf.name_scope('train'):
train_input = Input(train_data, train_seq, train_config)
with tf.variable_scope('model', reuse=None, initializer=initializer):
train_model = LSTMCascade(train_config, train_input, is_train=True)
with tf.name_scope('valid'):
valid_input = Input(valid_data, valid_seq, train_config)
with tf.variable_scope('model', reuse=True, initializer=initializer):
valid_model = LSTMCascade(train_config, train_input, is_train=False)
# with tf.name_scope('test'):
# test_input = Input(test_data, test_config)
# with tf.variable_scope('model', reuse=True, initializer=initializer):
# test_model = LSTMCascade(test_config, test_input, is_train=False)
with tf.name_scope('query'):
query_input = Input(query_data, query_seq, query_config)
with tf.variable_scope('model', reuse=True, initializer=initializer):
query_model = LSTMCascade(query_config, query_input, is_train=False, external_targets=mesh_target)
prev_x = np.zeros((2,1,3), dtype = np.float32)
generate_data, generate_seq = get_data(prev_x)
print('generate seq: {}'.format(generate_seq))
with tf.name_scope('generate'):
generate_input = Input(generate_data, generate_seq, generate_config)
with tf.variable_scope('model', reuse=True, initializer=initializer):
generate_model = LSTMCascade(generate_config, generate_input, is_sample=True, is_train=False)
if CREATE_GIFS:
query_models = []
for i in range(2,len(query_data)):
with tf.name_scope('gif_query'+str(i)):
seg_query_data = query_data[0:i,:]
seg_query_seq = query_seq[0:i,:]
int_seg_query_data = integrate(seg_query_data, seg_query_seq)
last_point = int_seg_query_data[-1]
xmin, xmax = (int_seg_query_data[:,0]-last_point[0]).min()-10, (int_seg_query_data[:,0]-last_point[0]).max()+10
ymin, ymax = ((int_seg_query_data[:,1]-last_point[1]).min()-10), ((int_seg_query_data[:,1]-last_point[1]).max()+10)
xrng = np.linspace(xmin, xmax, 200, True)
yrng = np.linspace(ymin, ymax, 200, True)
xg, yg = np.meshgrid(xrng, yrng)
xreshape, yreshape = xg.reshape(-1,1), yg.reshape(-1,1)
third_col = np.ones(xreshape.shape)
mesh_target = np.hstack([xreshape, yreshape, third_col])
mesh_target = mesh_target.reshape(-1, 1, 3).astype('float32')
query_input = Input(seg_query_data, seg_query_seq, query_config)
with tf.variable_scope('model', reuse=True, initializer=initializer):
query_models.append(LSTMCascade(query_config, query_input, is_train=False, external_targets=mesh_target))
# print out all trainable variables:
tvars = tf.trainable_variables()
print('trainable variables:')
print('\n'.join([' - ' + tvar.name for tvar in tvars]))
# create a session
session = tf.Session()
# # let's save our computation graph IF we don't already have a parameter
saver = tf.train.Saver()
# need to explicitly start the queue runners so the index variable
# doesn't hang. (not sure how PTB did this - I think the
# Supervisor takes care of it)
tf.train.start_queue_runners(session)
if len(sys.argv) > 1:
saver.restore(session, sys.argv[1])
print('Did a restore. Here are all the variables:')
tvars = tf.global_variables()
print('\n'.join([' - ' + tvar.name for tvar in tvars]))
# l, pred = query_model.run_epoch(session, return_predictions=True, query=True)
# make_heat_plot('Model {}'.format(0), l, query_data, query_seq, xrng, yrng, xg, pred, 1000)
if CREATE_GIFS:
for idx, model in enumerate(query_models):
int_query_data = integrate(model.model_input.posdata, model.model_input.seqinfo)
last_point = int_query_data[-1]
xmin, xmax = (int_query_data[:,0]-last_point[0]).min()-10, (int_query_data[:,0]-last_point[0]).max()+10
ymin, ymax = ((int_query_data[:,1]-last_point[1]).min()-10), ((int_query_data[:,1]-last_point[1]).max()+10)
xrng = np.linspace(xmin, xmax, 200, True)
yrng = np.linspace(ymin, ymax, 200, True)
l, pred = model.run_epoch(session,return_predictions=True, query=True)
make_heat_plot_no_integrate('Model {}'.format(idx), l, int_query_data, xrng, yrng, xg, pred, idx)
# MATT: can you help here?
if GENERATE_HANDWRITING:
# not sure what model we should pass in
strokes = generate_model.sample(session)
seq = np.ones(shape = (strokes.shape[0], 1))
seq[0,0] = 0
make_handwriting_plot(strokes, seq)
print('Handwriting generated.')
if CREATE_TENSORBOARD:
writer = tf.summary.FileWriter("tensorboard_output", session.graph)
writer.close()
else:
# initialize all the variables
session.run(tf.global_variables_initializer())
# for each epoch
for epoch in range(num_epochs):
# run the epoch & get training loss
l = train_model.run_epoch(session)
print('training loss at epoch {} is {}'.format(epoch, l))
if epoch % save_every == 0:
print('Saving model..... ')
if not os.path.isdir('models'):
os.mkdir('models')
written_path = saver.save(session, 'models/rnn_demo',
global_step=epoch)
print('saved model to {}'.format(written_path))
# see if we should do a printed/graphical update
if epoch % update_every == 0:
print()
l = valid_model.run_epoch(session)
print('validation loss at epoch {} is {:.2f}'.format(epoch, l))
l, pred = query_model.run_epoch(session, return_predictions=True, query=True)
# make_heat_plot('epoch {}'.format(epoch), l, query_data, query_seq, xrng, yrng, xg, pred, epoch)
if not os.path.isdir('models'):
os.mkdir('models')
written_path = saver.save(session, 'models/rnn_demo',
global_step=epoch)
print('saved model to {}'.format(written_path))
print()
written_path = saver.save(session, 'models/rnn_demo', global_step=num_epochs)
print('saved final model to {}'.format(written_path))
# do final update
l, pred = query_model.run_epoch(session, return_predictions=True, query=True)
make_heat_plot('final', l, query_data, seq, xrng, yrng, xg, pred, 1000)
if __name__ == '__main__':
main()
| [
"john@johnjlarkin.com"
] | john@johnjlarkin.com |
cf3f7d4b603af16a1a425ff974ad35b791f632d2 | dadf439ca3d004fe2794aff7be637b0864b670a4 | /app.py | 37fcf91437ae864db2e138f9f7e80b8eee6dd97b | [] | no_license | GlobalMediaBridge/cosmetic-server | 47803a5367544cb26c6edaff0535b98d8df52a66 | 8b46ad4d90a064aacd603539ab131858b377d566 | refs/heads/master | 2022-06-29T02:56:12.175285 | 2020-05-13T08:09:57 | 2020-05-13T08:09:57 | 237,736,639 | 0 | 2 | null | 2020-03-10T04:13:52 | 2020-02-02T07:45:06 | Python | UTF-8 | Python | false | false | 48 | py | from flaskr import app
app.run(host='0.0.0.0')
| [
"dmsdn960@gmail.com"
] | dmsdn960@gmail.com |
99ad59c775b63230f25d7993be8b872c6b228962 | 101597749f454c69c1e58c9b162e70e4aece48a4 | /migrations/versions/b80821e6bdb4_.py | 5ebff4530ebfa76ed008789f5983eec3a13eac5a | [] | no_license | williamle92/DivvyChallenge | 86faf99c6227816291cebffc6efb891ec4773d6a | 3e71dfe36566a6e9f48565c3cb1fab9d36247907 | refs/heads/main | 2023-07-13T12:41:01.042602 | 2021-08-20T10:30:08 | 2021-08-20T10:30:08 | 377,760,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,373 | py | """empty message
Revision ID: b80821e6bdb4
Revises:
Create Date: 2021-06-16 20:29:08.531109
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b80821e6bdb4'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('divvy',
sa.Column('trip_id', sa.Integer(), nullable=False),
sa.Column('start_time', sa.DateTime(), nullable=False),
sa.Column('stop_time', sa.DateTime(), nullable=False),
sa.Column('bike_id', sa.Integer(), nullable=False),
sa.Column('from_station_id', sa.Integer(), nullable=False),
sa.Column('from_station_name', sa.String(length=100), nullable=False),
sa.Column('to_station_id', sa.Integer(), nullable=False),
sa.Column('to_station_name', sa.String(length=100), nullable=False),
sa.Column('user_type', sa.String(length=40), nullable=False),
sa.Column('gender', sa.String(length=30), nullable=True),
sa.Column('birthday', sa.String(length=50), nullable=True),
sa.Column('trip_duration', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('trip_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('divvy')
# ### end Alembic commands ###
| [
"william1992le@gmail.com"
] | william1992le@gmail.com |
ae91760c4985aeba9d571a6564528f4b4efec575 | f474177a0a233e24ef0f1e2e703c0bd8cbc5275c | /produce_html.py | d35e062b1699c415a7357c9886b9caff74b97f73 | [] | no_license | GeekSi/PythonAndroidPresureTest | ad27ea6643fd83fb623a0f5eccb2a250b41917ca | e350b6558e7577c3dccfb5c8be811fa831605af5 | refs/heads/master | 2023-03-23T16:38:46.070071 | 2021-03-15T07:47:43 | 2021-03-15T07:47:43 | 293,988,312 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | import create_chars
from utils import utils
from constant import constants
create_chars.createChars()
utils.open("file://" + constants.PATH_CHARS) | [
"siqing@baidu.com"
] | siqing@baidu.com |
1e89afd9a83db8cd06e1c65eddbaa5eba67c16bd | 082bb9ec05031022a7779afc3a819c857b3e582a | /routes/reply.py | e7be4d61f75502db2e292ec88d203e86d88d35ea | [] | no_license | yuanzhw/forum-server | 43e0357aeaa7796ebb669d854cc87e04e02a0072 | 863da7590da66f422258d3b03baa7927b3dc8409 | refs/heads/master | 2020-04-16T23:36:27.688666 | 2020-01-29T15:59:40 | 2020-01-29T15:59:40 | 166,018,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,006 | py | from flask import (
request,
redirect,
url_for,
Blueprint,
)
from utils import response
import status
from models.reply import Reply
from routes.helper import current_user, login_required, csrf_required, new_csrf_token
main = Blueprint('reply', __name__)
@main.route('/new', methods=['POST'])
@login_required
@csrf_required
def add():
u = current_user()
form = request.get_json()
Reply.new(form, u.id)
data = 'reply create successfully'
return response(data=data, status=status.HTTP_201_CREATED)
@main.route('/list/<int:topic_id>')
@login_required
def reply_list(topic_id):
ms = Reply.all(topic_id=topic_id)
data = list()
for m in ms:
m: Reply
data.append(dict(content=m.content,
user_id=m.user_id,
topic_id=m.topic_id,
username=m.user().username,
create_time=m.created_time, ))
return response(data=data, status=status.HTTP_200_OK)
| [
"yuanzhw@vip.qq.com"
] | yuanzhw@vip.qq.com |
1b42aa6b93abad9f4afde19580c025c5be9f9167 | b792116b8f9b48b53464f55996338dcb451d6af9 | /tests/functional/services/transfer/endpoint_manager/test_endpoint_manager_task_successful_transfers.py | 528a2977c8d5f7dc1f356bd8dd042e1683c3e73c | [
"Apache-2.0"
] | permissive | globus/globus-sdk-python | 7884084dbe889300604d2ef1bb0a9ee351046fc2 | a5d7c02ede2b9cb794444305f3039a0cd82bab3a | refs/heads/main | 2023-09-01T15:15:17.499197 | 2023-08-30T21:47:08 | 2023-08-30T21:47:08 | 53,344,710 | 61 | 44 | Apache-2.0 | 2023-09-12T01:59:30 | 2016-03-07T17:26:28 | Python | UTF-8 | Python | false | false | 376 | py | from globus_sdk._testing import load_response
def test_endpoint_manager_task_successful_transfers(client):
meta = load_response(client.endpoint_manager_task_successful_transfers).metadata
response = client.endpoint_manager_task_successful_transfers(meta["task_id"])
assert response.http_status == 200
assert response["DATA_TYPE"] == "successful_transfers"
| [
"noreply@github.com"
] | noreply@github.com |
847306585ef3a46f500482752ba79115adf8c449 | 9d454ae0d5dd1d7e96e904ced80ca502019bb659 | /1550_threeConsecutiveOdds.py | 94c3d3a5aabfa1262828a5bafb4860615440bd8a | [] | no_license | zzz686970/leetcode-2018 | dad2c3db3b6360662a90ea709e58d7facec5c797 | 16e4343922041929bc3021e152093425066620bb | refs/heads/master | 2021-08-18T08:11:10.153394 | 2021-07-22T15:58:52 | 2021-07-22T15:58:52 | 135,581,395 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | def threeConsecutiveOdds(arr):
return any([False if i < 2 else arr[i-2] & arr[i-1] & arr[i] & 1 for i in range(len(arr))]) | [
"1564256031@qq.com"
] | 1564256031@qq.com |
05665fcf64d3a5dce47280b02e07942c49888a5f | 065483e0cf041897f155cfe2b0de3e33bd55388a | /cellseg_models_pytorch/decoders/long_skips/merging/sum.py | ff2bc8791ce74453acd644cdca6e59dc330f499e | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | okunator/cellseg_models.pytorch | 234d2a66d2eebda8348487bcd26f4083a09f5dcc | 7f79405012eb934b419bbdba8de23f35e840ca85 | refs/heads/main | 2023-09-04T11:38:58.802570 | 2023-08-29T13:07:38 | 2023-08-29T13:07:38 | 450,787,123 | 43 | 0 | MIT | 2023-03-27T09:23:26 | 2022-01-22T10:39:50 | Python | UTF-8 | Python | false | false | 2,877 | py | from typing import Tuple
import torch
import torch.nn as nn
from cellseg_models_pytorch.modules import Conv, Norm
__all__ = ["SumBlock"]
class SumBlock(nn.ModuleDict):
def __init__(
self,
in_channels: int,
skip_channels: Tuple[int, ...],
convolution: str = "conv",
normalization: str = "bn",
**kwargs,
) -> None:
"""Merge by summation.
Handles clashing channel numbers with a regular conv block.
Parameters
----------
in_channels : int
Number of input channels
skip_channels : Tuple[int, ...]:
Number of skip channels
convolution : str
Name of the convolution method in the downsampling blocks.
normalization : str
Name of the normalization method in the downsampling blocks.
"""
super().__init__()
self.in_channels = in_channels
self.skip_channels = skip_channels
self.pool = [chl != self.in_channels for chl in self.skip_channels]
# add channel pooling modules if necessary
for i, needs_pooling in enumerate(self.pool):
if needs_pooling:
downsample = nn.Sequential(
Conv(
convolution,
in_channels=skip_channels[i],
bias=False,
out_channels=in_channels,
kernel_size=1,
padding=0,
),
Norm(normalization, num_features=in_channels),
)
self.add_module(f"downsample{i + 1}", downsample)
@property
def out_channels(self) -> int:
"""Out channels."""
return self.in_channels
def forward(
self,
x: torch.Tensor,
skips: Tuple[torch.Tensor, ...],
) -> torch.Tensor:
"""Forward of the sum block.
Parameters
----------
x : torch.Tensor
Input tensor. Shape: (B, C, H, W)
skips : Tuple[torch.Tensor, ...]
All the skip features in a list. Shapes: (B, C, H, W).
Returns
-------
torch.Tensor:
The summed output tensor. Shape (B, C, H, W).
"""
if self.values():
skips = list(skips)
for i, needs_pooling in enumerate(self.pool):
if needs_pooling:
skips[i] = self[f"downsample{i + 1}"](skips[i])
x = torch.stack([x, *skips], dim=0).sum(dim=0)
return x
def extra_repr(self) -> str:
"""Add extra info to print."""
s = "in_channels={in_channels}, skip_channels={skip_channels}"
s = s.format(**self.__dict__) + f", out_channels={self.out_channels}"
return s
| [
"noreply@github.com"
] | noreply@github.com |
d1151a3774182955baa6a2bb9d92711c09ca72db | dc5876f4f26bca276253985e5aadaaddafe476a7 | /books/models.py | ee6ec46f3b15a20409ff9add61de6ae5b4ee9086 | [] | no_license | SidAhmed01/Library-Management-System-With-Python-Django | 2805b01ad701869ca8a4ced8aa0e0865614f3533 | c0ec4da5dcca2300ee32f68674b7ee410b9be19b | refs/heads/main | 2023-07-07T16:10:19.661261 | 2021-07-31T10:34:52 | 2021-07-31T10:34:52 | 387,015,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,281 | py | from django.db import models
# Create your models here.
class category(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Book(models.Model):
status_book = [
('availble','availble'),
('rental','rental'),
('sold','sold'),
]
title = models.CharField(max_length=250)
author = models.CharField(max_length=250, null=True, blank=True)
photo_book = models.ImageField(upload_to='photos', null=True, blank=True)
photo_auther = models.ImageField(upload_to='photos', null=True, blank=True)
pages = models.IntegerField(null=True, blank=True)
price = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)
ratal_price_day = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)
retal_period = models.IntegerField(null=True, blank=True)
total_rental = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)
active = models.BooleanField(default=True)
status = models.CharField(max_length=50, choices=status_book, null=True, blank=True)
category = models.ForeignKey(category, on_delete=models.PROTECT, null=True, blank=True)
def __str__(self):
return self.title
| [
"aitalisidou@gmail.com"
] | aitalisidou@gmail.com |
1457243d3f4ccfa460915b008bfdd848f9970fe5 | cf7b0ab779e273c3a553fa7e6ca4e98c524ec8f9 | /JKDatabaseSystem/predict.py | d55f3908698c089bdba163affcb10aea25be2673 | [] | no_license | zenmeder/JKDatabaseSystem | 369a40172420eb1f783467b5884e6e94f6fa9a71 | 146a552e549c9a1ef131bb475ecf5e8947696a6c | refs/heads/master | 2020-03-19T09:12:41.774587 | 2018-06-12T03:04:56 | 2018-06-12T03:04:56 | 136,268,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,410 | py | #!/usr/local/bin/ python3
# -*- coding:utf-8 -*-
# __author__ = "zenmeder"
from django.shortcuts import render
from django.http import HttpRequest, JsonResponse
from JKDatabaseSystem.TimeSeriesData import TimeSeriesData
from fbprophet import Prophet
import datetime
import pandas as pd
TEST_DATA = {0: {'2014-03-07 12:00:00': 1.2120997914767013, '2014-03-06 12:00:00': 1.2116781135882257, '2014-03-08 12:00:00': 1.212471194311644, '2014-03-09 12:00:00': 1.3844232263987608, '2014-03-05 12:00:00': 1.1638272230171982, '2014-03-04 12:00:00': 0.923545255041049, '2014-03-03 12:00:00': 0.9175520826485787}, 1: {'2014-03-07 12:00:00': 1.5332522218575242, '2014-03-06 12:00:00': 1.526531461767218, '2014-03-08 12:00:00': 1.5032414834593189, '2014-03-09 12:00:00': 1.746297246223891, '2014-03-05 12:00:00': 1.4674655977033975, '2014-03-04 12:00:00': 1.2717764396516358, '2014-03-03 12:00:00': 1.2615316161024095}, 2: {'2014-03-07 12:00:00': 1.9056879239927216, '2014-03-06 12:00:00': 1.9107812337316765, '2014-03-08 12:00:00': 1.8803181902752815, '2014-03-09 12:00:00': 2.2351995060468397, '2014-03-05 12:00:00': 1.8487757217452507, '2014-03-04 12:00:00': 1.6206211133231647, '2014-03-03 12:00:00': 1.6132048802520231}, 3: {'2014-03-07 12:00:00': 2.7648248924746555, '2014-03-06 12:00:00': 2.765424163206701, '2014-03-08 12:00:00': 2.7143788840420155, '2014-03-09 12:00:00': 3.2498322455157282, '2014-03-05 12:00:00': 2.6398536732133184, '2014-03-04 12:00:00': 2.2662173607694855, '2014-03-03 12:00:00': 2.327574529632627}, 4: {'2014-03-07 12:00:00': 2.942128586128699, '2014-03-06 12:00:00': 2.8785318972685685, '2014-03-08 12:00:00': 2.9828077878812005, '2014-03-09 12:00:00': 3.4763916714020855, '2014-03-05 12:00:00': 2.754313124906797, '2014-03-04 12:00:00': 2.481293746052308, '2014-03-03 12:00:00': 2.459704508317135}, 5: {'2014-03-07 12:00:00': 3.2908987516516612, '2014-03-06 12:00:00': 3.172392885446487, '2014-03-08 12:00:00': 3.375353720692149, '2014-03-09 12:00:00': 3.908655020842233, '2014-03-05 12:00:00': 3.0087293483987163, '2014-03-04 12:00:00': 2.7427631510741044, '2014-03-03 12:00:00': 2.725082364776283}, 6: {'2014-03-07 12:00:00': 4.016937766844582, '2014-03-06 12:00:00': 3.871594782494509, '2014-03-08 12:00:00': 4.192399547414983, '2014-03-09 12:00:00': 4.759177257125063, '2014-03-05 12:00:00': 3.5857646753894894, '2014-03-04 12:00:00': 3.278758996200701, '2014-03-03 12:00:00': 3.2673703429609158}, 7: {'2014-03-07 12:00:00': 4.249328689335783, '2014-03-06 12:00:00': 4.117491829368469, '2014-03-08 12:00:00': 4.51199924642879, '2014-03-09 12:00:00': 5.0653542866016075, '2014-03-05 12:00:00': 3.7731758323126265, '2014-03-04 12:00:00': 3.467087436901849, '2014-03-03 12:00:00': 3.471268452445188}, 8: {'2014-03-07 12:00:00': 3.811902138323838, '2014-03-06 12:00:00': 3.6883455892241446, '2014-03-08 12:00:00': 4.0792415800205974, '2014-03-09 12:00:00': 4.59010842097665, '2014-03-05 12:00:00': 3.4181845389246015, '2014-03-04 12:00:00': 3.1682675709825032, '2014-03-03 12:00:00': 3.153356120237021}, 9: {'2014-03-07 12:00:00': 4.149166488628491, '2014-03-06 12:00:00': 3.999057043628336, '2014-03-08 12:00:00': 4.4594411031546635, '2014-03-09 12:00:00': 4.957426808876585, '2014-03-05 12:00:00': 3.6695771425118138, '2014-03-04 12:00:00': 3.3906903796106946, '2014-03-03 12:00:00': 3.3880598498533527}, 10: {'2014-03-07 12:00:00': 4.384639989493111, '2014-03-06 12:00:00': 4.204154127202213, '2014-03-08 12:00:00': 4.71488328203959, '2014-03-09 12:00:00': 5.209739875899027, '2014-03-05 12:00:00': 3.848877365526905, '2014-03-04 12:00:00': 3.5181726391119734, '2014-03-03 12:00:00': 3.512480025409349}, 11: {'2014-03-07 12:00:00': 4.517181669974732, '2014-03-06 12:00:00': 4.330482943290319, '2014-03-08 12:00:00': 4.86326621966086, '2014-03-09 12:00:00': 5.357100433873345, '2014-03-05 12:00:00': 3.9633065077069487, '2014-03-04 12:00:00': 3.6091081208469173, '2014-03-03 12:00:00': 3.603359593180855}, 12: {'2014-03-07 12:00:00': 4.557598715893092, '2014-03-06 12:00:00': 4.374477765462619, '2014-03-08 12:00:00': 4.933074080818161, '2014-03-09 12:00:00': 5.440007577777552, '2014-03-05 12:00:00': 3.9845184589823064, '2014-03-04 12:00:00': 3.6136473900928774, '2014-03-03 12:00:00': 3.609645412133984}, 13: {'2014-03-07 12:00:00': 4.728690722889779, '2014-03-06 12:00:00': 4.52902885917943, '2014-03-08 12:00:00': 5.14273875705736, '2014-03-09 12:00:00': 5.680146825864299, '2014-03-05 12:00:00': 4.11998079263715, '2014-03-04 12:00:00': 3.758363880183923, '2014-03-03 12:00:00': 3.7521766772034013}, 14: {'2014-03-07 12:00:00': 4.8537859980512446, '2014-03-06 12:00:00': 4.638226404885125, '2014-03-08 12:00:00': 5.287801696414493, '2014-03-09 12:00:00': 5.842171521319935, '2014-03-05 12:00:00': 4.211573084684768, '2014-03-04 12:00:00': 3.8264791745104647, '2014-03-03 12:00:00': 3.8209117935860215}, 15: {'2014-03-07 12:00:00': 4.982300026547082, '2014-03-06 12:00:00': 4.752562880142724, '2014-03-08 12:00:00': 5.441753639660359, '2014-03-09 12:00:00': 6.0001587431620695, '2014-03-05 12:00:00': 4.30634085936811, '2014-03-04 12:00:00': 3.8705389877221936, '2014-03-03 12:00:00': 3.8746108763515266}, 16: {'2014-03-07 12:00:00': 4.995966460997572, '2014-03-06 12:00:00': 4.767340163151203, '2014-03-08 12:00:00': 5.464534708368071, '2014-03-09 12:00:00': 6.000740981011272, '2014-03-05 12:00:00': 4.3131606149311414, '2014-03-04 12:00:00': 3.880692107624679, '2014-03-03 12:00:00': 3.8788034848045116}, 17: {'2014-03-07 12:00:00': 5.154862121185602, '2014-03-06 12:00:00': 4.900485703428413, '2014-03-08 12:00:00': 5.6159678142401575, '2014-03-09 12:00:00': 6.167685862363228, '2014-03-05 12:00:00': 4.434083024428246, '2014-03-04 12:00:00': 3.9870079934175386, '2014-03-03 12:00:00': 3.986751311062997}, 18: {'2014-03-07 12:00:00': 5.202712863419298, '2014-03-06 12:00:00': 4.9177122107075375, '2014-03-08 12:00:00': 5.6475633199310735, '2014-03-09 12:00:00': 6.180290734451574, '2014-03-05 12:00:00': 4.448718125300234, '2014-03-04 12:00:00': 4.031285430809366, '2014-03-03 12:00:00': 4.023532920455256}, 19: {'2014-03-07 12:00:00': 5.253318618860409, '2014-03-06 12:00:00': 4.966696927687657, '2014-03-08 12:00:00': 5.698002915940261, '2014-03-09 12:00:00': 6.263456286256881, '2014-03-05 12:00:00': 4.482473823721926, '2014-03-04 12:00:00': 4.053803113702857, '2014-03-03 12:00:00': 4.058210624430443}, 20: {'2014-03-07 12:00:00': 5.24728564099078, '2014-03-06 12:00:00': 4.959103160468854, '2014-03-08 12:00:00': 5.678875114183265, '2014-03-09 12:00:00': 6.168254133484884, '2014-03-05 12:00:00': 4.487508693866284, '2014-03-04 12:00:00': 4.048830988196032, '2014-03-03 12:00:00': 4.046352222304918}, 21: {'2014-03-07 12:00:00': 5.399934304482368, '2014-03-06 12:00:00': 5.071273090286988, '2014-03-08 12:00:00': 5.836656729532166, '2014-03-09 12:00:00': 6.336812648989962, '2014-03-05 12:00:00': 4.581375114396962, '2014-03-04 12:00:00': 4.117327902284706, '2014-03-03 12:00:00': 4.125257924875781}, 22: {'2014-03-07 12:00:00': 5.482476339751947, '2014-03-06 12:00:00': 5.145488386787796, '2014-03-08 12:00:00': 5.861887387367247, '2014-03-09 12:00:00': 6.431849843337869, '2014-03-05 12:00:00': 4.6860891242276415, '2014-03-04 12:00:00': 4.179511984545777, '2014-03-03 12:00:00': 4.181885113130891}, 23: {'2014-03-07 12:00:00': 5.508324542459601, '2014-03-06 12:00:00': 5.156812443333906, '2014-03-08 12:00:00': 5.892224667559948, '2014-03-09 12:00:00': 6.468323308598551, '2014-03-05 12:00:00': 4.698952395122188, '2014-03-04 12:00:00': 4.20164204385288, '2014-03-03 12:00:00': 4.20179427998208}, 24: {'2014-03-07 12:00:00': 5.518712130696857, '2014-03-06 12:00:00': 5.12452657196658, '2014-03-08 12:00:00': 5.879235131673008, '2014-03-09 12:00:00': 6.408983936236088, '2014-03-05 12:00:00': 4.6622825389151314, '2014-03-04 12:00:00': 4.187350537621232, '2014-03-03 12:00:00': 4.18873768161252}, 25: {'2014-03-07 12:00:00': 5.483519440304063, '2014-03-06 12:00:00': 5.085689140913717, '2014-03-08 12:00:00': 5.868090271724241, '2014-03-09 12:00:00': 6.349295580382658, '2014-03-05 12:00:00': 4.621992838074203, '2014-03-04 12:00:00': 4.117896961897474, '2014-03-03 12:00:00': 4.114252565448722}, 26: {'2014-03-07 12:00:00': 5.400832510458932, '2014-03-06 12:00:00': 5.000707763975142, '2014-03-08 12:00:00': 5.830254958406513, '2014-03-09 12:00:00': 6.229555465217554, '2014-03-05 12:00:00': 4.52860873984674, '2014-03-04 12:00:00': 4.02519080767602, '2014-03-03 12:00:00': 4.02155189156778}, 27: {'2014-03-07 12:00:00': 5.396701581825498, '2014-03-06 12:00:00': 4.944689302002762, '2014-03-08 12:00:00': 5.805081660153495, '2014-03-09 12:00:00': 6.1617271307163515, '2014-03-05 12:00:00': 4.47735376993166, '2014-03-04 12:00:00': 3.962130868829558, '2014-03-03 12:00:00': 3.9630323347542826}, 28: {'2014-03-07 12:00:00': 5.274278639508935, '2014-03-06 12:00:00': 4.804247104452619, '2014-03-08 12:00:00': 5.6771465566062105, '2014-03-09 12:00:00': 5.9712368416264185, '2014-03-05 12:00:00': 4.336216923153408, '2014-03-04 12:00:00': 3.8691702076971097, '2014-03-03 12:00:00': 3.870825983127008}, 29: {'2014-03-07 12:00:00': 5.199109754140114, '2014-03-06 12:00:00': 4.716282080136748, '2014-03-08 12:00:00': 5.607067151972428, '2014-03-09 12:00:00': 5.8788143957434675, '2014-03-05 12:00:00': 4.247884486710246, '2014-03-04 12:00:00': 3.7612487543675757, '2014-03-03 12:00:00': 3.7648823526557265}, 30: {'2014-03-07 12:00:00': 5.271053840531307, '2014-03-06 12:00:00': 4.797887808969085, '2014-03-08 12:00:00': 5.643992403475767, '2014-03-09 12:00:00': 5.932320112241504, '2014-03-05 12:00:00': 4.346921658803735, '2014-03-04 12:00:00': 3.778750297814985, '2014-03-03 12:00:00': 3.7772726749211363}, 31: {'2014-03-07 12:00:00': 5.240423021130306, '2014-03-06 12:00:00': 4.715502917068826, '2014-03-08 12:00:00': 5.582400428615877, '2014-03-09 12:00:00': 5.854627128196157, '2014-03-05 12:00:00': 4.274818483303973, '2014-03-04 12:00:00': 3.7217439050730996, '2014-03-03 12:00:00': 3.716154816756108}, 32: {'2014-03-07 12:00:00': 5.18289072462912, '2014-03-06 12:00:00': 4.645553502083477, '2014-03-08 12:00:00': 5.480516961719628, '2014-03-09 12:00:00': 5.7378115121666, '2014-03-05 12:00:00': 4.208476834539437, '2014-03-04 12:00:00': 3.642982519711558, '2014-03-03 12:00:00': 3.643744899219943}, 33: {'2014-03-07 12:00:00': 5.130409437994107, '2014-03-06 12:00:00': 4.5686108302584145, '2014-03-08 12:00:00': 5.39191728216167, '2014-03-09 12:00:00': 5.619774427409833, '2014-03-05 12:00:00': 4.128541491574849, '2014-03-04 12:00:00': 3.5746304151408648, '2014-03-03 12:00:00': 3.576261213636808}, 34: {'2014-03-07 12:00:00': 5.086585397766003, '2014-03-06 12:00:00': 4.496602278165531, '2014-03-08 12:00:00': 5.319639938918989, '2014-03-09 12:00:00': 5.55886693679318, '2014-03-05 12:00:00': 4.064980061303736, '2014-03-04 12:00:00': 3.5228021504478764, '2014-03-03 12:00:00': 3.5224349798612544}, 35: {'2014-03-07 12:00:00': 5.028361997867397, '2014-03-06 12:00:00': 4.409950137799052, '2014-03-08 12:00:00': 5.2389995881489995, '2014-03-09 12:00:00': 5.476440098861262, '2014-03-05 12:00:00': 3.9794852339419857, '2014-03-04 12:00:00': 3.4567251640387595, '2014-03-03 12:00:00': 3.4534883394862748}, 36: {'2014-03-07 12:00:00': 5.0379784870206175, '2014-03-06 12:00:00': 4.396828912193349, '2014-03-08 12:00:00': 5.265147690291491, '2014-03-09 12:00:00': 5.499412553762366, '2014-03-05 12:00:00': 3.9707819809952456, '2014-03-04 12:00:00': 3.437291031779273, '2014-03-03 12:00:00': 3.4316229777320513}, 37: {'2014-03-07 12:00:00': 4.946217175829068, '2014-03-06 12:00:00': 4.284179241750051, '2014-03-08 12:00:00': 5.159796357228574, '2014-03-09 12:00:00': 5.385869851295304, '2014-03-05 12:00:00': 3.8395437574284923, '2014-03-04 12:00:00': 3.3523735805982477, '2014-03-03 12:00:00': 3.3546857017207072}, 38: {'2014-03-07 12:00:00': 4.9272229281770645, '2014-03-06 12:00:00': 4.263086058793835, '2014-03-08 12:00:00': 5.15331118862941, '2014-03-09 12:00:00': 5.376322668551757, '2014-03-05 12:00:00': 3.830611878350488, '2014-03-04 12:00:00': 3.3333715322654056, '2014-03-03 12:00:00': 3.331142285092203}, 39: {'2014-03-07 12:00:00': 5.001043033926471, '2014-03-06 12:00:00': 4.34615914226643, '2014-03-08 12:00:00': 5.262890486780162, '2014-03-09 12:00:00': 5.4813972865257306, '2014-03-05 12:00:00': 3.888828011754361, '2014-03-04 12:00:00': 3.392649961551692, '2014-03-03 12:00:00': 3.391103171497694}, 40: {'2014-03-07 12:00:00': 4.931225015976859, '2014-03-06 12:00:00': 4.288280407527408, '2014-03-08 12:00:00': 5.189620863077374, '2014-03-09 12:00:00': 5.41816932836655, '2014-03-05 12:00:00': 3.845434215460349, '2014-03-04 12:00:00': 3.319630319820673, '2014-03-03 12:00:00': 3.3193331568740456}, 41: {'2014-03-07 12:00:00': 4.72170511282164, '2014-03-06 12:00:00': 4.087335189822222, '2014-03-08 12:00:00': 4.986013226164268, '2014-03-09 12:00:00': 5.128355877623166, '2014-03-05 12:00:00': 3.654458975164452, '2014-03-04 12:00:00': 3.0831844948354608, '2014-03-03 12:00:00': 3.0876854111867145}, 42: {'2014-03-07 12:00:00': 4.649315653675834, '2014-03-06 12:00:00': 4.001886578959376, '2014-03-08 12:00:00': 4.885743663088561, '2014-03-09 12:00:00': 5.038286253942503, '2014-03-05 12:00:00': 3.585838771913737, '2014-03-04 12:00:00': 3.0045404901711406, '2014-03-03 12:00:00': 3.009574228382581}, 43: {'2014-03-07 12:00:00': 4.525094604165622, '2014-03-06 12:00:00': 3.8872751090946394, '2014-03-08 12:00:00': 4.764736002277631, '2014-03-09 12:00:00': 4.883926224821534, '2014-03-05 12:00:00': 3.493913172899678, '2014-03-04 12:00:00': 2.9034407483104463, '2014-03-03 12:00:00': 2.9031916291526683}, 44: {'2014-03-07 12:00:00': 4.363531781830997, '2014-03-06 12:00:00': 3.767719605924953, '2014-03-08 12:00:00': 4.542630093907058, '2014-03-09 12:00:00': 4.660758581395757, '2014-03-05 12:00:00': 3.403458986783429, '2014-03-04 12:00:00': 2.725840943567614, '2014-03-03 12:00:00': 2.7268491639044523}, 45: {'2014-03-07 12:00:00': 4.129644192259908, '2014-03-06 12:00:00': 3.55542458519538, '2014-03-08 12:00:00': 4.3102118781748215, '2014-03-09 12:00:00': 4.395509580233224, '2014-03-05 12:00:00': 3.2286407979240974, '2014-03-04 12:00:00': 2.569543174706597, '2014-03-03 12:00:00': 2.565669125622233}, 46: {'2014-03-07 12:00:00': 3.7064632656219767, '2014-03-06 12:00:00': 3.224648383303685, '2014-03-08 12:00:00': 3.8615655712378025, '2014-03-09 12:00:00': 3.8799110388582716, '2014-03-05 12:00:00': 2.922499269647263, '2014-03-04 12:00:00': 2.242751027347426, '2014-03-03 12:00:00': 2.2487531325436865}, 47: {'2014-03-07 12:00:00': 3.490602681008634, '2014-03-06 12:00:00': 3.0557076486972874, '2014-03-08 12:00:00': 3.6031987996680894, '2014-03-09 12:00:00': 3.6785591115225795, '2014-03-05 12:00:00': 2.8107833874443253, '2014-03-04 12:00:00': 2.085721728688493, '2014-03-03 12:00:00': 2.084177167412027}, 48: {'2014-03-07 12:00:00': 3.6038140547645465, '2014-03-06 12:00:00': 3.220424246056145, '2014-03-08 12:00:00': 3.728237244783922, '2014-03-09 12:00:00': 3.9160348107383163, '2014-03-05 12:00:00': 2.9737210601156994, '2014-03-04 12:00:00': 2.2060286635295676, '2014-03-03 12:00:00': 2.2073041225326686}, 49: {'2014-03-07 12:00:00': 3.3837971844148096, '2014-03-06 12:00:00': 3.082605834640277, '2014-03-08 12:00:00': 3.458852892477709, '2014-03-09 12:00:00': 3.704077025252617, '2014-03-05 12:00:00': 2.87800928792968, '2014-03-04 12:00:00': 2.067805156954437, '2014-03-03 12:00:00': 2.067338137983966}, 50: {'2014-03-07 12:00:00': 3.1686168407267994, '2014-03-06 12:00:00': 2.837549444055821, '2014-03-08 12:00:00': 3.1918140794335033, '2014-03-09 12:00:00': 3.363103832725527, '2014-03-05 12:00:00': 2.6667598750896357, '2014-03-04 12:00:00': 1.9370303744812958, '2014-03-03 12:00:00': 1.9341458140437247}, 51: {'2014-03-07 12:00:00': 2.9822797026317005, '2014-03-06 12:00:00': 2.6581266981409937, '2014-03-08 12:00:00': 3.0199590088392614, '2014-03-09 12:00:00': 3.1200152133439514, '2014-03-05 12:00:00': 2.505892930815183, '2014-03-04 12:00:00': 1.7702449659644321, '2014-03-03 12:00:00': 1.7541490259938786}, 52: {'2014-03-07 12:00:00': 2.6316545276598, '2014-03-06 12:00:00': 2.375103107291915, '2014-03-08 12:00:00': 2.6602928266958377, '2014-03-09 12:00:00': 2.7545523624560686, '2014-03-05 12:00:00': 2.2771831827805733, '2014-03-04 12:00:00': 1.5184577549530422, '2014-03-03 12:00:00': 1.5129648723161124}, 53: {'2014-03-07 12:00:00': 2.237260514821839, '2014-03-06 12:00:00': 2.033824410033967, '2014-03-08 12:00:00': 2.2116299184098738, '2014-03-09 12:00:00': 2.2866148794031687, '2014-03-05 12:00:00': 1.9565283890464709, '2014-03-04 12:00:00': 1.3050267643372306, '2014-03-03 12:00:00': 1.2985023263472173}, 54: {'2014-03-07 12:00:00': 2.0802503661950658, '2014-03-06 12:00:00': 1.9060303956986524, '2014-03-08 12:00:00': 2.008077186352988, '2014-03-09 12:00:00': 2.101590291382821, '2014-03-05 12:00:00': 1.858245646364947, '2014-03-04 12:00:00': 1.2099689495316581, '2014-03-03 12:00:00': 1.2018105715123615}, 55: {'2014-03-07 12:00:00': 2.203421976031889, '2014-03-06 12:00:00': 2.0627787691842907, '2014-03-08 12:00:00': 2.1437083292503205, '2014-03-09 12:00:00': 2.238881154180697, '2014-03-05 12:00:00': 1.967652734471553, '2014-03-04 12:00:00': 1.368572998915142, '2014-03-03 12:00:00': 1.3571067804812753}, 56: {'2014-03-07 12:00:00': 1.77206374184608, '2014-03-06 12:00:00': 1.6552438104135914, '2014-03-08 12:00:00': 1.6816807949681591, '2014-03-09 12:00:00': 1.763311443653312, '2014-03-05 12:00:00': 1.6303744808485012, '2014-03-04 12:00:00': 0.9956861183183104, '2014-03-03 12:00:00': 0.991776860831716}, 57: {'2014-03-07 12:00:00': 1.7884100272525068, '2014-03-06 12:00:00': 1.6853951371314213, '2014-03-08 12:00:00': 1.7079731184467726, '2014-03-09 12:00:00': 1.7635500233159416, '2014-03-05 12:00:00': 1.7016451507313495, '2014-03-04 12:00:00': 0.953223402459179, '2014-03-03 12:00:00': 0.9644723178180187}, 58: {'2014-03-07 12:00:00': 1.5351735480870006, '2014-03-06 12:00:00': 1.446146573625134, '2014-03-08 12:00:00': 1.4235742063614196, '2014-03-09 12:00:00': 1.5127122537341497, '2014-03-05 12:00:00': 1.4517422921637941, '2014-03-04 12:00:00': 0.7474742967042274, '2014-03-03 12:00:00': 0.7538298176558043}, 59: {'2014-03-07 12:00:00': 1.185532376558341, '2014-03-06 12:00:00': 1.1344791001578232, '2014-03-08 12:00:00': 1.0669664753016765, '2014-03-09 12:00:00': 1.1399089194293284, '2014-03-05 12:00:00': 1.1351259200900714, '2014-03-04 12:00:00': 0.606652572623172, '2014-03-03 12:00:00': 0.5971856839396525}, 60: {'2014-03-07 12:00:00': 1.2984709007410555, '2014-03-06 12:00:00': 1.2492071106065472, '2014-03-08 12:00:00': 1.18444450394085, '2014-03-09 12:00:00': 1.285581470252333, '2014-03-05 12:00:00': 1.242789206832799, '2014-03-04 12:00:00': 0.6714456356452327, '2014-03-03 12:00:00': 0.6641353695322889}, 61: {'2014-03-07 12:00:00': 1.0357970578365452, '2014-03-06 12:00:00': 1.0163044476619172, '2014-03-08 12:00:00': 0.9177748242177008, '2014-03-09 12:00:00': 0.9640118426150013, '2014-03-05 12:00:00': 1.0167878889085087, '2014-03-04 12:00:00': 0.42772382890446015, '2014-03-03 12:00:00': 0.41910941640946814}, 62: {'2014-03-07 12:00:00': 0.9840562006583616, '2014-03-06 12:00:00': 0.9653443269283581, '2014-03-08 12:00:00': 0.8419394539988281, '2014-03-09 12:00:00': 0.8642292018700907, '2014-03-05 12:00:00': 0.9880948221074728, '2014-03-04 12:00:00': 0.3368284157898797, '2014-03-03 12:00:00': 0.3324913276369121}, 63: {'2014-03-07 12:00:00': 0.5414410503193048, '2014-03-06 12:00:00': 0.5746540285360042, '2014-03-08 12:00:00': 0.4492303425869086, '2014-03-09 12:00:00': 0.4133316538842644, '2014-03-05 12:00:00': 0.576422756954016, '2014-03-04 12:00:00': 0.1662644534665083, '2014-03-03 12:00:00': 0.15663172375869325}, 64: {'2014-03-07 12:00:00': 0.2728603532529036, '2014-03-06 12:00:00': 0.3335193884012214, '2014-03-08 12:00:00': 0.15156039846270827, '2014-03-09 12:00:00': 0.1337408586188209, '2014-03-05 12:00:00': 0.34406324806335725, '2014-03-04 12:00:00': -0.04449552594511327, '2014-03-03 12:00:00': -0.04857705121927225}, 65: {'2014-03-07 12:00:00': 0.1441313190088474, '2014-03-06 12:00:00': 0.2140233995786434, '2014-03-08 12:00:00': 0.014855136086431998, '2014-03-09 12:00:00': 0.020789792616794152, '2014-03-05 12:00:00': 0.22655513258305685, '2014-03-04 12:00:00': -0.09816837407792184, '2014-03-03 12:00:00': -0.09973887191710613}, 66: {'2014-03-07 12:00:00': -0.31726396398630946, '2014-03-06 12:00:00': -0.23264687651784335, '2014-03-08 12:00:00': -0.49607018048482027, '2014-03-09 12:00:00': -0.5996685342227505, '2014-03-05 12:00:00': -0.19408240523200407, '2014-03-04 12:00:00': -0.5716641799017685, '2014-03-03 12:00:00': -0.5629068319001435}, 67: {'2014-03-07 12:00:00': 0.26406091032608653, '2014-03-06 12:00:00': 0.32405567940966823, '2014-03-08 12:00:00': 0.13438596690284385, '2014-03-09 12:00:00': 0.1679027783170509, '2014-03-05 12:00:00': 0.3305543857663267, '2014-03-04 12:00:00': -0.06705375703185276, '2014-03-03 12:00:00': -0.07122394038869365}, 68: {'2014-03-07 12:00:00': 0.4818703940655362, '2014-03-06 12:00:00': 0.5304904257694811, '2014-03-08 12:00:00': 0.3120530298227146, '2014-03-09 12:00:00': 0.2705463897091246, '2014-03-05 12:00:00': 0.5590878502834743, '2014-03-04 12:00:00': -0.19239402282976759, '2014-03-03 12:00:00': -0.19577757326581557}, 69: {'2014-03-07 12:00:00': 0.4833350340193846, '2014-03-06 12:00:00': 0.49490926269590757, '2014-03-08 12:00:00': 0.41230429968959476, '2014-03-09 12:00:00': 0.4093498863474939, '2014-03-05 12:00:00': 0.48135065939324384, '2014-03-04 12:00:00': 0.10581870344725727, '2014-03-03 12:00:00': 0.09346387862242006}, 70: {'2014-03-07 12:00:00': 0.3687107752790827, '2014-03-06 12:00:00': 0.3972866316673671, '2014-03-08 12:00:00': 0.3351625103927471, '2014-03-09 12:00:00': 0.31211622181233795, '2014-03-05 12:00:00': 0.38020091390789135, '2014-03-04 12:00:00': -0.07361713184467097, '2014-03-03 12:00:00': -0.08184382170278101}, 71: {'2014-03-07 12:00:00': 0.6134144584592595, '2014-03-06 12:00:00': 0.6149252686746396, '2014-03-08 12:00:00': 0.61434477872488, '2014-03-09 12:00:00': 0.6314399231912046, '2014-03-05 12:00:00': 0.5860034567685071, '2014-03-04 12:00:00': 0.1918414757432364, '2014-03-03 12:00:00': 0.17531929574342883}, 72: {'2014-03-07 12:00:00': 0.7394305687465407, '2014-03-06 12:00:00': 0.7204856467283107, '2014-03-08 12:00:00': 0.749385558203481, '2014-03-09 12:00:00': 0.7915245971317283, '2014-03-05 12:00:00': 0.6835685625968877, '2014-03-04 12:00:00': 0.2946252794940849, '2014-03-03 12:00:00': 0.27425547723933846}, 73: {'2014-03-07 12:00:00': 0.3976378343898861, '2014-03-06 12:00:00': 0.3308848426786907, '2014-03-08 12:00:00': 0.31865526249595133, '2014-03-09 12:00:00': 0.2811646929563335, '2014-03-05 12:00:00': 0.3087447464098552, '2014-03-04 12:00:00': -0.038890979753760824, '2014-03-03 12:00:00': -0.04772528373753371}, 74: {'2014-03-07 12:00:00': 0.4902680761224601, '2014-03-06 12:00:00': 0.4234747277055335, '2014-03-08 12:00:00': 0.42397403607912226, '2014-03-09 12:00:00': 0.40561201073292813, '2014-03-05 12:00:00': 0.39923794901097354, '2014-03-04 12:00:00': 0.019547357812265048, '2014-03-03 12:00:00': 0.009125852646010069}, 75: {'2014-03-07 12:00:00': 0.5666916469180087, '2014-03-06 12:00:00': 0.49362672544944414, '2014-03-08 12:00:00': 0.48792165384193864, '2014-03-09 12:00:00': 0.5106496187420866, '2014-03-05 12:00:00': 0.46670706893386277, '2014-03-04 12:00:00': 0.08381092578033461, '2014-03-03 12:00:00': 0.07193573291538155}, 76: {'2014-03-07 12:00:00': 0.6732173175158089, '2014-03-06 12:00:00': 0.5711677528513498, '2014-03-08 12:00:00': 0.6166067705878218, '2014-03-09 12:00:00': 0.658487308162122, '2014-03-05 12:00:00': 0.5316698703870091, '2014-03-04 12:00:00': 0.17559622186346865, '2014-03-03 12:00:00': 0.16064976361976901}, 77: {'2014-03-07 12:00:00': 0.7018708573217332, '2014-03-06 12:00:00': 0.5813155662535714, '2014-03-08 12:00:00': 0.654906492873171, '2014-03-09 12:00:00': 0.6823053765362732, '2014-03-05 12:00:00': 0.5394719851562317, '2014-03-04 12:00:00': 0.2089569417341057, '2014-03-03 12:00:00': 0.19198187065677716}, 78: {'2014-03-07 12:00:00': 0.79497832739408, '2014-03-06 12:00:00': 0.6595665995099554, '2014-03-08 12:00:00': 0.7400488975220715, '2014-03-09 12:00:00': 0.8011666305822064, '2014-03-05 12:00:00': 0.6098554552250026, '2014-03-04 12:00:00': 0.2464647264296698, '2014-03-03 12:00:00': 0.2291889813021834}, 79: {'2014-03-07 12:00:00': 0.8329354386809618, '2014-03-06 12:00:00': 0.6324667606527294, '2014-03-08 12:00:00': 0.7874400939145138, '2014-03-09 12:00:00': 0.8025480922624864, '2014-03-05 12:00:00': 0.5749597701239134, '2014-03-04 12:00:00': 0.2520548328140329, '2014-03-03 12:00:00': 0.23480301091660602}, 80: {'2014-03-07 12:00:00': 0.8067454923172355, '2014-03-06 12:00:00': 0.5625704465206198, '2014-03-08 12:00:00': 0.7662222483666211, '2014-03-09 12:00:00': 0.7459029006971558, '2014-03-05 12:00:00': 0.5026487189791736, '2014-03-04 12:00:00': 0.21795315154494418, '2014-03-03 12:00:00': 0.20046208495685142}, 81: {'2014-03-07 12:00:00': 0.8113384599668045, '2014-03-06 12:00:00': 0.48752391218220853, '2014-03-08 12:00:00': 0.7677518480482376, '2014-03-09 12:00:00': 0.6907594792957428, '2014-03-05 12:00:00': 0.3801293407944545, '2014-03-04 12:00:00': -0.12568974984868508, '2014-03-03 12:00:00': -0.12553573533139972}, 82: {'2014-03-07 12:00:00': 0.6283965640945607, '2014-03-06 12:00:00': 0.351798938165081, '2014-03-08 12:00:00': 0.6083835829647908, '2014-03-09 12:00:00': 0.5183837141551498, '2014-03-05 12:00:00': 0.26390872311342833, '2014-03-04 12:00:00': -0.24595663122112743, '2014-03-03 12:00:00': -0.24595328539227038}, 83: {'2014-03-07 12:00:00': 0.55478807192813, '2014-03-06 12:00:00': 0.29957397744688463, '2014-03-08 12:00:00': 0.5442427444506803, '2014-03-09 12:00:00': 0.4563244207194359, '2014-03-05 12:00:00': 0.22279030678454442, '2014-03-04 12:00:00': -0.31160751750928806, '2014-03-03 12:00:00': -0.31115568882876043}, 84: {'2014-03-07 12:00:00': 0.551101330980102, '2014-03-06 12:00:00': 0.3011347109362216, '2014-03-08 12:00:00': 0.5510709774248197, '2014-03-09 12:00:00': 0.4643750865417838, '2014-03-05 12:00:00': 0.22333242290187205, '2014-03-04 12:00:00': -0.31159155469354716, '2014-03-03 12:00:00': -0.3115743255359599}, 85: {'2014-03-07 12:00:00': 0.3601862363382602, '2014-03-06 12:00:00': 0.18175120146161167, '2014-03-08 12:00:00': 0.3655817569410714, '2014-03-09 12:00:00': 0.271315495060216, '2014-03-05 12:00:00': 0.1375654343662917, '2014-03-04 12:00:00': -0.15837964472067964, '2014-03-03 12:00:00': -0.16540247659535726}, 86: {'2014-03-07 12:00:00': 0.3764890501390275, '2014-03-06 12:00:00': 0.21994971551131742, '2014-03-08 12:00:00': 0.35156682878016643, '2014-03-09 12:00:00': 0.31574271969701273, '2014-03-05 12:00:00': 0.17592974094926492, '2014-03-04 12:00:00': -0.19041990463760594, '2014-03-03 12:00:00': -0.19693033036705437}, 87: {'2014-03-07 12:00:00': 0.371958756462457, '2014-03-06 12:00:00': 0.30196474298318154, '2014-03-08 12:00:00': 0.3652942773787211, '2014-03-09 12:00:00': 0.44195424674174383, '2014-03-05 12:00:00': 0.2565051941503364, '2014-03-04 12:00:00': -0.19341881188262994, '2014-03-03 12:00:00': -0.19342156814200173}, 88: {'2014-03-07 12:00:00': 0.049003598337108246, '2014-03-06 12:00:00': 0.04105795737490717, '2014-03-08 12:00:00': 0.022083921329372473, '2014-03-09 12:00:00': 0.059699458732959704, '2014-03-05 12:00:00': 0.02162518456933677, '2014-03-04 12:00:00': -0.2565801982288831, '2014-03-03 12:00:00': -0.25781948553297795}, 89: {'2014-03-07 12:00:00': 0.07120991162939172, '2014-03-06 12:00:00': 0.07135522051885346, '2014-03-08 12:00:00': 0.0466718334131766, '2014-03-09 12:00:00': 0.10712028511350523, '2014-03-05 12:00:00': 0.05448156462223287, '2014-03-04 12:00:00': -0.18570795385090152, '2014-03-03 12:00:00': -0.1880665009781033}, 90: {'2014-03-07 12:00:00': 0.06441214301147341, '2014-03-06 12:00:00': 0.06966977365402965, '2014-03-08 12:00:00': 0.04869869175683612, '2014-03-09 12:00:00': 0.09962900156239665, '2014-03-05 12:00:00': 0.054205227646569126, '2014-03-04 12:00:00': -0.16599617276453182, '2014-03-03 12:00:00': -0.1682495052962475}, 91: {'2014-03-07 12:00:00': 0.00613512439209429, '2014-03-06 12:00:00': 0.04124414116476949, '2014-03-08 12:00:00': -0.0046611062498681105, '2014-03-09 12:00:00': 0.05440207263844933, '2014-03-05 12:00:00': 0.031111712108839472, '2014-03-04 12:00:00': -0.16944232291815442, '2014-03-03 12:00:00': -0.1707955962330114}, 92: {'2014-03-07 12:00:00': 0.012035642537377956, '2014-03-06 12:00:00': 0.05246568797117702, '2014-03-08 12:00:00': -0.008109427886085362, '2014-03-09 12:00:00': 0.0625105421316924, '2014-03-05 12:00:00': 0.04564699040058084, '2014-03-04 12:00:00': -0.1306308395671311, '2014-03-03 12:00:00': -0.13196806479634948}, 93: {'2014-03-07 12:00:00': -0.3364259986056006, '2014-03-06 12:00:00': -0.22111645556663387, '2014-03-08 12:00:00': -0.35262496003636484, '2014-03-09 12:00:00': -0.35334161191814717, '2014-03-05 12:00:00': -0.2054824787381681, '2014-03-04 12:00:00': -0.4202806650495371, '2014-03-03 12:00:00': -0.41683736988349845}, 94: {'2014-03-07 12:00:00': -0.5458897687117312, '2014-03-06 12:00:00': -0.43946413972471793, '2014-03-08 12:00:00': -0.5846113335376663, '2014-03-09 12:00:00': -0.6263606259147649, '2014-03-05 12:00:00': -0.4238628692323021, '2014-03-04 12:00:00': -0.6073037727766368, '2014-03-03 12:00:00': -0.6010284965949724}, 95: {'2014-03-07 12:00:00': -0.670460746514578, '2014-03-06 12:00:00': -0.6098797481781845, '2014-03-08 12:00:00': -0.7278562494472929, '2014-03-09 12:00:00': -0.8275954775311841, '2014-03-05 12:00:00': -0.5940658983912237, '2014-03-04 12:00:00': -0.7420468726859938, '2014-03-03 12:00:00': -0.7337159220107534}, 96: {'2014-03-07 12:00:00': -0.6020402516147566, '2014-03-06 12:00:00': -0.5204638397853713, '2014-03-08 12:00:00': -0.6319098308610852, '2014-03-09 12:00:00': -0.6700535363230398, '2014-03-05 12:00:00': -0.5184965469141745, '2014-03-04 12:00:00': -0.6455426099974056, '2014-03-03 12:00:00': -0.636510101170255}, 97: {'2014-03-07 12:00:00': -0.6937943665035677, '2014-03-06 12:00:00': -0.604334545171399, '2014-03-08 12:00:00': -0.694064794540567, '2014-03-09 12:00:00': -0.6691654971663674, '2014-03-05 12:00:00': -0.6127910220861091, '2014-03-04 12:00:00': -0.704271502349781, '2014-03-03 12:00:00': -0.6941062978695992}, 98: {'2014-03-07 12:00:00': -0.6914312740895392, '2014-03-06 12:00:00': -0.6114849643167969, '2014-03-08 12:00:00': -0.6641925775357975, '2014-03-09 12:00:00': -0.6340820378771184, '2014-03-05 12:00:00': -0.6288994997443805, '2014-03-04 12:00:00': -0.7144805525538082, '2014-03-03 12:00:00': -0.7044216411612443}, 99: {'2014-03-07 12:00:00': -0.921940340186195, '2014-03-06 12:00:00': -0.7983447890651834, '2014-03-08 12:00:00': -0.9077640282469562, '2014-03-09 12:00:00': -0.8760935738259852, '2014-03-05 12:00:00': -0.8087802012826655, '2014-03-04 12:00:00': -0.9498251101796926, '2014-03-03 12:00:00': -0.9376344022442387}, 100: {'2014-03-07 12:00:00': -0.6502811791490869, '2014-03-06 12:00:00': -0.5622896754701989, '2014-03-08 12:00:00': -0.6473804278381912, '2014-03-09 12:00:00': -0.6028920553188749, '2014-03-05 12:00:00': -0.5763592654755898, '2014-03-04 12:00:00': -0.6871377658340921, '2014-03-03 12:00:00': -0.6783114719115901}, 101: {'2014-03-07 12:00:00': -0.4136747172385559, '2014-03-06 12:00:00': -0.368554189022442, '2014-03-08 12:00:00': -0.3894316021032892, '2014-03-09 12:00:00': -0.2668161326264487, '2014-03-05 12:00:00': -0.39082091481578807, '2014-03-04 12:00:00': -0.4957795292399291, '2014-03-03 12:00:00': -0.4907511449217411}, 102: {'2014-03-07 12:00:00': -0.4701751453940642, '2014-03-06 12:00:00': -0.42091085615951657, '2014-03-08 12:00:00': -0.4311115899746283, '2014-03-09 12:00:00': -0.3219436882837803, '2014-03-05 12:00:00': -0.44506053476270147, '2014-03-04 12:00:00': -0.5422723824602041, '2014-03-03 12:00:00': -0.5364903830827352}, 103: {'2014-03-07 12:00:00': -0.3852862076196847, '2014-03-06 12:00:00': -0.3719256062209696, '2014-03-08 12:00:00': -0.3485496759624416, '2014-03-09 12:00:00': -0.22878887508411433, '2014-03-05 12:00:00': -0.3986534855865486, '2014-03-04 12:00:00': -0.4940729642002879, '2014-03-03 12:00:00': -0.48938415470182034}, 104: {'2014-03-07 12:00:00': -0.4765366535544934, '2014-03-06 12:00:00': -0.4821486364684108, '2014-03-08 12:00:00': -0.4596008223880053, '2014-03-09 12:00:00': -0.39910048979970114, '2014-03-05 12:00:00': -0.5055294434432277, '2014-03-04 12:00:00': -0.6041808901584869, '2014-03-03 12:00:00': -0.5974969889159666}, 105: {'2014-03-07 12:00:00': -0.4465173444391115, '2014-03-06 12:00:00': -0.5043861474453069, '2014-03-08 12:00:00': -0.43106728022014756, '2014-03-09 12:00:00': -0.412475451399181, '2014-03-05 12:00:00': -0.5372682075455392, '2014-03-04 12:00:00': -0.6311454896119801, '2014-03-03 12:00:00': -0.6246301031101439}, 106: {'2014-03-07 12:00:00': -0.5137502060144477, '2014-03-06 12:00:00': -0.6185911370714507, '2014-03-08 12:00:00': -0.5143346447150555, '2014-03-09 12:00:00': -0.5544198483426842, '2014-03-05 12:00:00': -0.6502592886205524, '2014-03-04 12:00:00': -0.6951057556637562, '2014-03-03 12:00:00': -0.6866586931631931}, 107: {'2014-03-07 12:00:00': -0.6455855872631884, '2014-03-06 12:00:00': -0.7678174646153069, '2014-03-08 12:00:00': -0.6269859806526044, '2014-03-09 12:00:00': -0.6801430070909245, '2014-03-05 12:00:00': -0.8031711030506336, '2014-03-04 12:00:00': -0.8364778773117615, '2014-03-03 12:00:00': -0.825493353844011}, 108: {'2014-03-07 12:00:00': -0.5610260781429112, '2014-03-06 12:00:00': -0.66884732959063, '2014-03-08 12:00:00': -0.533960646472841, '2014-03-09 12:00:00': -0.5877961406505322, '2014-03-05 12:00:00': -0.7012238508069267, '2014-03-04 12:00:00': -0.7718349367193457, '2014-03-03 12:00:00': -0.7621935015356391}, 109: {'2014-03-07 12:00:00': -0.8010427366046018, '2014-03-06 12:00:00': -0.9148450603273686, '2014-03-08 12:00:00': -0.7773785389048666, '2014-03-09 12:00:00': -0.8733856389332111, '2014-03-05 12:00:00': -0.957441327495831, '2014-03-04 12:00:00': -0.9776535651811284, '2014-03-03 12:00:00': -0.9648310836519325}, 110: {'2014-03-07 12:00:00': -0.718961548040229, '2014-03-06 12:00:00': -0.7508079260985062, '2014-03-08 12:00:00': -0.6782747659911649, '2014-03-09 12:00:00': -0.6890674496186321, '2014-03-05 12:00:00': -0.7872691845896785, '2014-03-04 12:00:00': -0.8607746687931026, '2014-03-03 12:00:00': -0.8501142561051339}, 111: {'2014-03-07 12:00:00': -0.8496908232126043, '2014-03-06 12:00:00': -0.8696926402970174, '2014-03-08 12:00:00': -0.7663727491324865, '2014-03-09 12:00:00': -0.6430455632254302, '2014-03-05 12:00:00': -0.9172715293152394, '2014-03-04 12:00:00': -1.042282159052625, '2014-03-03 12:00:00': -1.0422588675082292}, 112: {'2014-03-07 12:00:00': -0.785794455536578, '2014-03-06 12:00:00': -0.8090893359911652, '2014-03-08 12:00:00': -0.6924777364417269, '2014-03-09 12:00:00': -0.5624847517855038, '2014-03-05 12:00:00': -0.8644613927368486, '2014-03-04 12:00:00': -0.9344363783666516, '2014-03-03 12:00:00': -0.9343966650524885}, 113: {'2014-03-07 12:00:00': -0.6944199404106433, '2014-03-06 12:00:00': -0.7610796369940018, '2014-03-08 12:00:00': -0.604427496111255, '2014-03-09 12:00:00': -0.4744360556862908, '2014-03-05 12:00:00': -0.8133073038046794, '2014-03-04 12:00:00': -0.9132915688794969, '2014-03-03 12:00:00': -0.9132843840133228}, 114: {'2014-03-07 12:00:00': -0.796261945969981, '2014-03-06 12:00:00': -0.819621636255575, '2014-03-08 12:00:00': -0.7228209944527726, '2014-03-09 12:00:00': -0.5392014238849501, '2014-03-05 12:00:00': -0.8676907185159071, '2014-03-04 12:00:00': -0.9578208733508333, '2014-03-03 12:00:00': -0.9578098471612002}, 115: {'2014-03-07 12:00:00': -0.7847010915545979, '2014-03-06 12:00:00': -0.8113306126874311, '2014-03-08 12:00:00': -0.698057610569589, '2014-03-09 12:00:00': -0.4980505454321248, '2014-03-05 12:00:00': -0.8686203963890118, '2014-03-04 12:00:00': -0.9485948263137156, '2014-03-03 12:00:00': -0.9485568766938955}, 116: {'2014-03-07 12:00:00': -0.7861369837966065, '2014-03-06 12:00:00': -0.8061301445732948, '2014-03-08 12:00:00': -0.6828163859002748, '2014-03-09 12:00:00': -0.4895021559214109, '2014-03-05 12:00:00': -0.862079178024816, '2014-03-04 12:00:00': -0.9720565583545961, '2014-03-03 12:00:00': -0.9720489596642032}, 117: {'2014-03-07 12:00:00': -0.7568089287994229, '2014-03-06 12:00:00': -0.83679627876044, '2014-03-08 12:00:00': -0.6501574319338538, '2014-03-09 12:00:00': -0.5201748065249561, '2014-03-05 12:00:00': -0.9025676902260925, '2014-03-04 12:00:00': -0.9975452480934477, '2014-03-03 12:00:00': -0.9975379702639766}, 118: {'2014-03-07 12:00:00': -0.7650160829763369, '2014-03-06 12:00:00': -0.855080016030069, '2014-03-08 12:00:00': -0.6282435153901746, '2014-03-09 12:00:00': -0.4814612492023555, '2014-03-05 12:00:00': -0.9290194989203092, '2014-03-04 12:00:00': -1.0240918797702916, '2014-03-03 12:00:00': -1.0240920028037588}, 119: {'2014-03-07 12:00:00': -0.7232482688639832, '2014-03-06 12:00:00': -0.8365230525124815, '2014-03-08 12:00:00': -0.5833137253599714, '2014-03-09 12:00:00': -0.44337613332529946, '2014-03-05 12:00:00': -0.9198824354734073, '2014-03-04 12:00:00': -0.9998369301253762, '2014-03-03 12:00:00': -0.9998086716352285}, 120: {'2014-03-07 12:00:00': -0.7524949779491814, '2014-03-06 12:00:00': -0.8524910255767493, '2014-03-08 12:00:00': -0.5925045336437524, '2014-03-09 12:00:00': -0.4425083377338669, '2014-03-05 12:00:00': -0.9367503625979765, '2014-03-04 12:00:00': -0.986764123770921, '2014-03-03 12:00:00': -0.9867865170209971}, 121: {'2014-03-07 12:00:00': -0.7524184344980247, '2014-03-06 12:00:00': -0.8690855184284105, '2014-03-08 12:00:00': -0.5623912735819394, '2014-03-09 12:00:00': -0.4757224844467808, '2014-03-05 12:00:00': -0.9643753304454576, '2014-03-04 12:00:00': -1.0043782486115522, '2014-03-03 12:00:00': -1.0043557851614746}, 122: {'2014-03-07 12:00:00': -0.8046197261838617, '2014-03-06 12:00:00': -0.9412931546167175, '2014-03-08 12:00:00': -0.6212763867233788, '2014-03-09 12:00:00': -0.5579391652107071, '2014-03-05 12:00:00': -1.0409939423460022, '2014-03-04 12:00:00': -1.0659923432846614, '2014-03-03 12:00:00': -1.0659876907834054}, 123: {'2014-03-07 12:00:00': -0.7836986371898685, '2014-03-06 12:00:00': -0.9436043730988743, '2014-03-08 12:00:00': -0.6137945996607728, '2014-03-09 12:00:00': -0.5804322573472035, '2014-03-05 12:00:00': -1.0471477756722996, '2014-03-04 12:00:00': -1.0172448118387063, '2014-03-03 12:00:00': -1.017309273248691}, 124: {'2014-03-07 12:00:00': -0.6950214182340423, '2014-03-06 12:00:00': -0.8650169170840974, '2014-03-08 12:00:00': -0.5183616063772665, '2014-03-09 12:00:00': -0.47508129496456775, '2014-03-05 12:00:00': -0.9814311907021701, '2014-03-04 12:00:00': -0.9563684480747451, '2014-03-03 12:00:00': -0.9563179463873037}, 125: {'2014-03-07 12:00:00': -0.7323402217710943, '2014-03-06 12:00:00': -0.8890411371364562, '2014-03-08 12:00:00': -0.5423102076730464, '2014-03-09 12:00:00': -0.4955985851387619, '2014-03-05 12:00:00': -1.00427774657555, '2014-03-04 12:00:00': -0.9642710510038891, '2014-03-03 12:00:00': -0.964257634225308}, 126: {'2014-03-07 12:00:00': -0.5767385502168696, '2014-03-06 12:00:00': -0.7599934980716333, '2014-03-08 12:00:00': -0.40016019155684396, '2014-03-09 12:00:00': -0.3969296041494819, '2014-03-05 12:00:00': -0.8701110507310228, '2014-03-04 12:00:00': -0.8450321541564761, '2014-03-03 12:00:00': -0.8449605237962361}}
def predict(request):
res = {}
if request.POST:
# tsd = TimeSeriesData(request.POST['modelName'], request.POST['date'], request.POST['sensorId']).getData()
# for i in tsd:
# data = tsd[i]
# m = Prophet()
# m.fit(data)
# future = m.make_future_dataframe(periods=7)
# forecast = m.predict(future)[["ds","yhat"]][-7:]
# d = {}
# for index, row in forecast.iterrows():
# d[datetime.datetime.strftime(row['ds'], "%Y-%m-%d %H:%M:%S")] = row['yhat']
# res[i] = d
res = TEST_DATA
df = pd.DataFrame(res).T
d = {}
for i in range(len(df.columns)):
d[i] = df.iloc[:, [i]].values.flatten().tolist()
# res['modelName'] = request.POST['modelName']
# return HttpRequest(simplejson.dumps(ctx))
# return render(request, 'hello.html', ctx)
# return HttpRequest(json.dumps(ctx), content_type="application/json")
return JsonResponse(d)
| [
"zenmeder@gmail.com"
] | zenmeder@gmail.com |
c40982d331906201733f34caceac7c488ca33a41 | a5486f0330a55340cf94c919019434ea0112c0f7 | /viewer/app/__init__.py | 3af0746464e3e75a22223a7c1958c4f77e2981e8 | [] | no_license | CPJKU/audio-tagger | a0194e8faa57ea16a298f35428c614a097d29671 | 08c12af04d887f6a671e7019f83274b360489722 | refs/heads/master | 2020-05-20T03:09:37.010832 | 2019-09-04T06:14:51 | 2019-09-04T06:14:51 | 185,350,579 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | """This package contains the functional component
of the kivy app which covers event functions, initializations
and periodic updates of visualization and view via
clocking mechanism.
""" | [
"alex@jku.at"
] | alex@jku.at |
5909b8a429dde3c3db85365a4a2fcafe8504a73c | 6febd920ced70cbb19695801a163c437e7be44d4 | /leetcode_oj/string/strStr.py | b0bc9a66ec3e9adf67f316f37ee2da101b6c25ef | [] | no_license | AngryBird3/gotta_code | b0ab47e846b424107dbd3b03e0c0f3afbd239c60 | b9975fef5fa4843bf95d067bea6d064723484289 | refs/heads/master | 2021-01-20T16:47:35.098125 | 2018-03-24T21:31:01 | 2018-03-24T21:31:01 | 53,180,336 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | class Solution(object):
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
if not haystack and not needle:
return 0
for i in range(len(haystack) - len(needle) + 1):
match = True
for j in range(len(needle)):
if haystack[i+j] != needle[j]:
match = False
break
if match:
return i
return -1
| [
"dhaaraa.darji@gmail.com"
] | dhaaraa.darji@gmail.com |
543d885088808f62594479bc296f3be23612b33d | 42fdf741bf64ea2e63d1546bb08356286f994505 | /test_20160921_macroblk_generation/rasp30_vmm_gen7.py | 5a540d9b8d4bc1419081b28fb7ceb4a4349b1771 | [] | no_license | skim819/RASP_Workspace_sihwan | 7e3cd403dc3965b8306ec203007490e3ea911e3b | 0799e146586595577c8efa05c647b8cb92b962f4 | refs/heads/master | 2020-12-24T05:22:25.775823 | 2017-04-01T22:15:18 | 2017-04-01T22:15:18 | 41,511,563 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 996 | py | self.dev_types =['fgota']*2 + ['ota_buf']*1 + ['ota']*1 + ['ota_vmm']*1 + ['cap']*4+ ['nfet']*2 + ['pfet']*2 + ['tgate']*4 + ['nmirror_vmm']*2+['ladder_blk']*1+ ['c4_blk']*1+ ['speech']*1+ ['INFneuron']*1+ ['lpf']*1+['nfet_i2v']*1+['pfet_i2v']*1+['nmirror_w_bias']*1+['fgswc_nmirror_w_bias']*1+['i2v_pfet_gatefgota']*1+['mismatch_meas']*1+['peak_detector']*1+['ramp_fe']*1+['sigma_delta_fe']*1+['vmm_senseamp1']*1+['vmm_senseamp2']*1+['wta']*1+['wta_primary']*1+['common_source']*1+['gnd_out']*1+['vdd_out']*1+['in2in_x1']*1+['in2in_x6']*1+['volt_div']*1+['volt_div_fgota']*1+['integrator']*1+['integrator_nmirror']*1+['fgswitch']*1+['tgate_so']*1+['vmm4x4_SR']*1+['vmm8x4_SR']*1+['SR4']*1+['vmm4x4_SR2']*1+['vmm4x4']*1+['sftreg']*1+['DAC_sftreg']*1 +['sftreg2']*1+['sftreg3']*1+['sftreg4']*1+['mmap_local_swc']*1+['th_logic']*1+['vmm8x4']*1+['vmm8inx8in']*1+['vmm8x4_in']*1+['vmm12x1']*1+['fg_io']*1+['ladder_filter']*1+['vmm12x1_wowta']*1+['TIA_blk']*1+['Adaptive_receptor']*1
+['testtemp']*1
| [
"ubuntu@ubuntu-VirtualBox.(none)"
] | ubuntu@ubuntu-VirtualBox.(none) |
a7e448139f2bd614be72df1a7ece9dde49e3be0f | 2a7e44adc8744c55a25e3cafcc2fa19a1607e697 | /settings_inspector/management/commands/inspect_settings.py | 6f7437af5ca147fdd91a75feddb2467cdbec5bf7 | [
"MIT"
] | permissive | fcurella/django-settings_inspector | 45529288dc8dde264383739c55abe6a9d2077ded | 69a6295de865f540d024e79aab4d211ce3c1d847 | refs/heads/master | 2020-06-04T01:57:17.216783 | 2012-01-05T19:05:12 | 2012-01-05T19:05:12 | 2,989,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | from settings_inspector.parser import Setting
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
def handle(self, *args, **options):
root_setting = Setting('django.conf')
import ipdb; ipdb.set_trace()
| [
"flavio.curella@gmail.com"
] | flavio.curella@gmail.com |
f940e17f31240e7c8c9de2238a058a19ddc44b9f | 3e622c2428ef8ccf8b8e16a7f2dfeb5f5d1b4c09 | /meiduo_mall/apps/areas/urls.py | d11b42b1bc351d90748479a576309e3a7ce95cbc | [] | no_license | Frankie379350068/meiduo_project_0417 | 24a0b8ec2972f50bff87ee77ddc91519020a4207 | f4b52ae623ce03fc2b91f1dfeaa75c5245a51742 | refs/heads/master | 2022-10-11T23:27:43.940643 | 2020-06-04T13:39:44 | 2020-06-04T13:39:44 | 266,492,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | from django.urls import path
from . import views
urlpatterns = [
# 查询省份地址: http://www.meiduo.site:8000/areas/
path('areas/', views.ProvinceAreasView.as_view()),
# 查询父级下属地址: http://www.meiduo.site:8000/areas/(?P<pk>[1-9]\d+)/
path('areas/<int:parentid>/', views.SubsAreasView.as_view()),
] | [
"379350068@qq.com"
] | 379350068@qq.com |
bf6038797d7302d368104f829deeb64bcbc334af | 5a34aff166990d6bc0a17ce15a123122b28844af | /highestProd3Ele.py | ce59746d6e5980f0f28d06454e6323cac0846837 | [] | no_license | monkeydunkey/interviewCakeProblems | 6ba01f5a54309b4af87c35314441b59734401e75 | a4b17026a812e88ae6888c997a254353de4980c1 | refs/heads/master | 2021-04-26T23:50:45.668819 | 2018-05-28T06:57:22 | 2018-05-28T06:57:22 | 123,868,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | import heapq
def highestProd(li):
h1, h2 = [], []
for i, l in enumerate(li):
if l < 0:
if len(h2) < 2:
heapq.heappush(h2, abs(l))
elif h2[0] < abs(l):
heapq.heappop(h2)
heapq.heappush(h2, abs(l))
if len(h1) < 3:
heapq.heappush(h1, l)
elif h1[0] < l:
heapq.heappop(h1)
heapq.heappush(h1, l)
prod1 = h1[0] * h1[1] * h1[2]
prod2 = max(h1) * h2[0] * h2[1] if len(h2) == 2 else float('-inf')
return max(prod2, prod1)
print highestProd([1, 2, 3, 4, 5])
print highestProd([-10, -10, 1, 2, 3])
print highestProd([-10, 1, 0, 0])
| [
"bhushan.shashank93@gmail.com"
] | bhushan.shashank93@gmail.com |
88b8a76d8eea00920d813fef2ae4105f8208a9db | 14bbbb474765cfa43f73d52e68ce66b4b9f04f7e | /lab02/lab02example01/scripts/subscriber_node.py | 918f029842c6bb8fbc85fc5af45327f81293b04f | [] | no_license | ucabwl2/robotics_system_hw | 267a6d209cb9df37f68261092d0c4f41c4b3e40e | 5da51ce01498ba1f4693522ac51467858e268cae | refs/heads/main | 2023-01-31T18:44:16.006437 | 2020-12-08T19:33:21 | 2020-12-08T19:33:21 | 319,704,116 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | #!/usr/bin/env python
import rospy
from std_msgs.msg import Float64MultiArray
def callback(msg):
print 'Incoming message: ', msg.data
def listener():
rospy.init_node('listener', anonymous=True)
sub = rospy.Subscriber("chatter", Float64MultiArray, callback)
rospy.spin()
if __name__ == '__main__':
listener() | [
"krittin.pachtrachai.13@ucl.ac.uk"
] | krittin.pachtrachai.13@ucl.ac.uk |
0481df519f330fc8063968e7d91da735c221868c | 7a38ae563ff3ab0496a7b2df8ff809170a26b88d | /Data_cuation_and_publiction_application/data_curation_correlation/models/correlation_analysis_model.py | 741f7653f35053e1e5022457155a886950b65263 | [] | no_license | walkerzjs/COMP9321_Group_Project_TwoTier_DataAnalysis_Platform | 8e83b109387caa9ba02614e560413357dbf1d43f | 22e0802ebbc44824ca70df9ca25b460ba1900f64 | refs/heads/master | 2022-12-10T07:06:35.978480 | 2019-09-24T11:56:39 | 2019-09-24T11:56:39 | 210,589,702 | 0 | 0 | null | 2022-12-08T02:35:01 | 2019-09-24T11:48:44 | Python | UTF-8 | Python | false | false | 268 | py | class CorrelationAnalytics:
def __init__(self, data_list, cols, pr, p_value, features, year):
self.data_list = data_list
self.cols = cols
self.pr = pr
self.p_value = p_value
self.features = features
self.year = year
| [
"jacobjszha"
] | jacobjszha |
ac2b2da7079dfa4652f071d55af373cacb3c74b6 | d09ca00f6618ce6f93f64565cc41e0e524d54e06 | /图像处理/读取图片.py | f7c6224ef328b98d7d15ff64945e482e785f4dea | [] | no_license | hzhhs/repoStudy | a1dab1058b5d6407188b1cbb9e01d95a6341b992 | 8784b824e214afa37e661501b0f68eea896c5970 | refs/heads/main | 2023-02-24T08:56:54.955313 | 2021-01-29T01:01:33 | 2021-01-29T01:01:33 | 333,996,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,180 | py | from PIL import Image
# pic_Im=Image.open("校园.jpg")
# pic_Im.show()
# 生成新的125*125的黑色图片
# newIm=Image.new('RGB',(125,125))
# newIm.show()
# newIm2=Image.new('RGB',(125,125),"red")
# newIm2=Image.new('RGB',(125,125),(255, 255, 0))
# newIm2.show()
# from PIL import Image
# im = Image.open("校园.jpg")
# print(im.format) ## 打印出格式信息输出是JPEG
# im.show()
# from PIL import Image
# im = Image.open("校园2.jpg")
# print(im.mode) ## 打印出模式信息
# im.show()
# 另存为其它格式图片
# pic_Im.save("校园3.png")
# 转为灰度图片
# Im=pic_Im.convert('L')
# Im.save("校园2.jpg")
# print(pic_Im.getbands())
# from PIL import Image
# im = Image.open("校园.jpg")
# new_im = im.convert('p')
# print(new_im.mode)
# new_im.save("校园4.jpg")
# new_im.show()
# from PIL import Image
# im = Image.open("校园3.jpg")
# print(im.size) ## 打印出尺寸信息
# print(im.palette)
# im.show()
# 对图像进行convert操作,转换成“P”模式,返回值为ImagePalette类
# from PIL import Image
# im = Image.open("校园.jpg")
# new_im = im.convert('P')
# print(new_im.mode)
# print(new_im.palette)
# from PIL import Image
# im = Image.open('timg.gif') # 读入一个GIF文件
# try:
# im.save('picframe{:02d}.png'.format(im.tell()))
# while True:
# im.seek(im.tell()+1)
# im.save('picframe{:02d}.png'.format(im.tell()))
# except:
# print("处理结束")
# 打开一个jpg图像文件,注意是当前路径:
# im = Image.open('校园.jpg')
# # 获得图像尺寸:
# w, h = im.size
# # 缩放到50%:
# im.thumbnail((w/8, h/8))
# # 把缩放后的图像用jpeg格式保存:
# im.save('校园thumbnail.jpg', 'jpeg')
#
# from PIL import Image
# im = Image.open("定妆.jpg")
# region = im.resize((400, 400)) ##重新设定大小
# region.save("定妆2.jpg",'jpeg')
# region.show()
# from PIL import Image
# im = Image.open("定妆.jpg")
# im_45 = im.rotate(45)
# im_30 = im.rotate(30, Image.NEAREST,1)
# print(im_45.size,im_30.size)
# im_45.show()
# im_30.show()
#
# from PIL import Image
# im1 = Image.open("校园.jpg")
# im2 = Image.open("职业.jpg")
# r1,g1,b1 = im1.split()
# r2,g2,b2 = im2.split()
# print(r1.mode,r1.size,g1.mode,g1.size)
# print(r2.mode,r2.size,g2.mode,g2.size)
# new_im=[r1,g2,b2]
# print(len(new_im))
# im_merge = Image.merge("RGB",new_im)
# im_merge.show()
#
# from PIL import Image
# im1 = Image.open("校园.jpg")
# im2 = Image.open("职业.jpg")
# print(im1.mode,im1.size)
# print(im2.mode,im2.size)
# im = Image.blend(im1, im2, 0.2)
# im.show()
# from PIL import Image, ImageFilter
# # 打开一个jpg图像文件,注意是当前路径:
# im = Image.open('校园.jpg')
# # 应用模糊滤镜:
# im2 = im.filter(ImageFilter.BLUR)
# im2.save('校园blur.jpg', 'jpeg')
#
from PIL import Image
from PIL import ImageFilter ## 调取ImageFilter
imgF = Image.open("校园.jpg")
bluF = imgF.filter(ImageFilter.BLUR) ##均值滤波
conF = imgF.filter(ImageFilter.CONTOUR) ##找轮廓
edgeF = imgF.filter(ImageFilter.FIND_EDGES) ##边缘检测
imgF.show()
bluF.show()
conF.show()
edgeF.show() | [
"32220021@qq.com"
] | 32220021@qq.com |
7e4bc63ff0c99cb0211fe28f6037acca5e125649 | 4383ca5e60ce9786aa6db8c4f9063740d38f65ed | /replaceClass.py | 011ce1ccc3853e9243b9b0f2bdbb3358d3227670 | [] | no_license | sahajamatya/replace-classes | f10cb4289b179e4dd4443452ee987903e0fe50c5 | aaa3c0459d50117ebea53b4dcb75a869c7d5755b | refs/heads/main | 2023-08-18T17:29:50.139440 | 2021-09-29T16:23:29 | 2021-09-29T16:23:29 | 411,743,259 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,109 | py | # Sahaj Amatya, 1001661825
import sys
# listifyLines() returns the file line as a list
def listifyLines(line):
arr = line.split(" ")
while("" in arr):
arr.remove("")
arr[len(arr) - 1] = arr[len(arr) - 1].replace('\n', '')
return arr
# getFileData() returns the file data as a 2D list
def getFileData(fileName):
inputFile = open(fileName, 'r')
lines = inputFile.readlines()
fileData = []
for line in lines:
fileData.append(listifyLines(line))
inputFile.close()
return fileData
# getClasses() returns a sorted list of all unique classes in the file
def getClasses(fileName):
inputFile = open(fileName, 'r')
lines = inputFile.readlines()
classes = []
for line in lines:
line = listifyLines(line)
if int(line[len(line) - 1]) not in classes:
classes.append(int(line[len(line) - 1]))
inputFile.close()
return sorted(classes)
# generateOutputFile() writes out a file with the specified substitutions
def generateOutputFile(fileData, fileName):
outputFile = open(fileName, 'a')
num = len(fileData[0][0]) + 4
for data in fileData:
counter = 0
tempString = ""
while(counter < len(data)):
tempString += data[counter].ljust(num) + " "
counter += 1
tempString += "\n"
outputFile.write(tempString)
outputFile.close()
def main():
inputFileName = sys.argv[1]
outputFileName = sys.argv[2]
fileData = getFileData(inputFileName)
classes = getClasses(inputFileName)
print("\nThese are all the unique classes in this dataset:", classes)
print("\n")
substitutions = {}
for c in classes:
substitutions[c] = input(
"Enter string substitution for class %d: " % c)
print("\n")
for data in fileData:
data[len(data) - 1] = substitutions[int(data[len(data) - 1])]
print("Generating output file...\n")
generateOutputFile(fileData, outputFileName)
print("Output file %s generated successfully." % outputFileName)
if __name__ == "__main__":
main()
| [
"sahajamatya1@gmail.com"
] | sahajamatya1@gmail.com |
6f7b66a092a1ba668bb12f1f51f409beabc05222 | 613789c1ed6425600194e0a98177a0a5a213d63b | /Utility.py | 42fcfe694419e1489a99ec7a942730579280ba58 | [] | no_license | hadeelayoub/Just_Flex_Copy | 0f10aac0453509f13e3665f68a8725a7c5b04ec7 | e0c6ae47280646172e166d55fbb0503c53172b0d | refs/heads/master | 2020-03-16T11:26:21.465678 | 2018-08-16T23:53:40 | 2018-08-16T23:53:40 | 132,648,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | import math
class Utility:
"""Class of useful utility functions"""
def __init__(self):
return
@staticmethod
def euc(a, b):
"""Calculate Euclidean distance between two vectors
Parameters `a` and `b` should be of identical dimension.
:param a: First input vector
:type a: [float]
:param b: First input vector
:type b: [float]
:return: Euclidean distance between input vectors
:rtype: float
"""
assert len(a) == len(b)
distance = 0
for dimension in zip(a, b):
distance += pow(dimension[0] - dimension[1], 2)
return math.sqrt(distance) | [
"edhill@daedalusai.com"
] | edhill@daedalusai.com |
daaac2f7da0023e63824e4f3ecceb5eb722fa571 | 59a4ffb20a830aaa63c471a324cf660dbad13c1c | /DeepNodes/activations.py | b80e015b417986f151dd16955af92da6d4f64874 | [
"MIT"
] | permissive | kyuunin/DeepNodes | d5c678bbe54a548e8c529fa085ac1656434b5487 | 17747b20b8bb803d3472dc0e31ba6833d724bb35 | refs/heads/master | 2022-11-11T03:59:54.877205 | 2020-07-02T21:30:03 | 2020-07-02T21:30:03 | 276,690,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | import torch.nn.functional as F
activations = {
"id": (lambda x: x),
"relu": F.relu_,
"hardtanh": F.hardtanh_,
"relu6": (lambda x:F.relu6_(x,inplace=True)),
"elu": F.elu_,
"selu": (lambda x:F.selu(x,inplace=True)),
"celu": (lambda x:F.celu(x,inplace=True)),
"leaky_relu": F.leaky_relu_,
"rrelu":F.rrelu_,
"gelu":F.gelu,
"logsigmoid":F.logsigmoid,
"hardshrink":F.hardshrink,
"tanhshrink":F.tanhshrink,
"softsign":F.softsign,
"softplus":F.softplus,
"softmin":(lambda x:F.softmin(x,1)),
"softmax":(lambda x:F.softmax(x,1)),
"softshrink":F.softshrink,
"gumbel_softmax":F.gumbel_softmax,
"log_softmax":(lambda x:F.log_softmax(x,1)),
"tanh":F.tanh,
"sigmoid":F.sigmoid,
}
| [
"cneumann@students.uni-mainz.de"
] | cneumann@students.uni-mainz.de |
bec6fd80e3ea2f7a84ac32ccb92ea92a81ec74c5 | 9efe6f966b23371edb6eaa5f91a7f4928094ce70 | /server/backend/machine/serializer.py | b5f2d74851e6c91fa85743b0daf879aac6004535 | [] | no_license | bmotevalli/condition-monitoring | 309c1a54ed80379ce5e26bc686ff63fd1ca45ea0 | 7816445546dcf9e81cceabb1acb654f65eb85cbf | refs/heads/master | 2023-08-15T19:52:15.112264 | 2020-08-10T11:58:47 | 2020-08-10T11:58:47 | 282,407,148 | 0 | 0 | null | 2021-09-22T19:37:25 | 2020-07-25T08:54:31 | JavaScript | UTF-8 | Python | false | false | 455 | py | from rest_framework import serializers
from machine.models import Machine, Sensor, TimeSeries
class MachineSerializer(serializers.ModelSerializer):
class Meta:
model=Machine
fields="__all__"
class SensorSerializer(serializers.ModelSerializer):
class Meta:
model=Sensor
fields="__all__"
class TimeSeriesSerializer(serializers.ModelSerializer):
class Meta:
model=TimeSeries
fields="__all__" | [
"n.taherifar@gmail.com"
] | n.taherifar@gmail.com |
c8e02b9b5c879e0b86c644cc5d67238be6fee662 | 176497ba1cea7233f249a5f439a65f7c472b267f | /06_blog_detail/01_fix_capturing_path_components/portfolio/urls.py | 4a04f504ac61f76660e661e918f5599fa68f8d02 | [] | no_license | CodingNomads/django-web-dev | 79a3a94707489ca0d5f0bf49193b7ffdf6270f4a | e03b8ed130f100afb0296c0d76a84206fbbf789d | refs/heads/master | 2023-05-02T05:12:21.427462 | 2022-11-06T17:56:14 | 2022-11-06T17:56:14 | 235,174,521 | 1 | 7 | null | 2023-04-21T20:54:10 | 2020-01-20T18:53:31 | Python | UTF-8 | Python | false | false | 809 | py | """portfolio URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('projects/', include('projects.urls')),
]
| [
"breuss.martin@gmail.com"
] | breuss.martin@gmail.com |
8c73a626279a8279f58605d57986fed262257c4e | a2d231ebc1ca11f974613870aef66cb615ce7120 | /Lista2/subset.py | 70f203cbfea6431338bd023c9e4e778c3dd284a3 | [] | no_license | CristianoSouza/Doutorado-PAA | ab3eb19bdf54b4a397a9ce134c03662154a615ff | 9208002cf91f057a537717623c228254e3cdd97c | refs/heads/master | 2020-03-15T23:37:01.361800 | 2018-06-26T04:38:54 | 2018-06-26T04:38:54 | 132,397,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | import numpy as n
import Queue as q
import numpy as np
class Subset:
numero = None
pai = None
rank = 0
| [
"cristianoantonio.souza10@gmail.com"
] | cristianoantonio.souza10@gmail.com |
3aee28344db5bb9ef5e6a6c277e7f3977ee8a468 | 48bdb4a2981e61b81bd64b9fb8489a6623cfe7f8 | /020_evaluate.py | 0064c2ced3c687f16e1a891111ed919374371f26 | [] | no_license | makama-md/Predict-Lung-Disease | 2a83eaab6d2ed70bf899f41638f75252fe857e01 | 5adb6862f5ffdc7c3bc31d245bd896736e3c35fb | refs/heads/master | 2022-01-06T06:17:41.412768 | 2019-05-16T03:18:50 | 2019-05-16T03:18:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,123 | py | import sys, os
import azure_chestxray_utils
import azure_chestxray_keras_utils
from keras.models import load_model
import os
import pickle
import cv2
import numpy as np
import pandas as pd
from keras.models import load_model
from keras.utils import Sequence
from sklearn import metrics
from tensorflow.python.client import device_lib
import keras_contrib
path = os.getcwd()+r'\azure-share'
amlWBSharedDir = path
prj_consts = azure_chestxray_utils.chestxray_consts()
data_base_input_dir=os.path.join(amlWBSharedDir,
os.path.join(*(prj_consts.BASE_INPUT_DIR_list)))
data_base_output_dir=os.path.join(amlWBSharedDir,
os.path.join(*(prj_consts.BASE_OUTPUT_DIR_list)))
weights_dir = os.path.join(data_base_output_dir, os.path.join(*(prj_consts.MODEL_WEIGHTS_DIR_list)))
fully_trained_weights_dir = os.path.join(data_base_output_dir, os.path.join(*(prj_consts.FULLY_PRETRAINED_MODEL_DIR_list)))
nih_chest_xray_data_dir = os.path.join(data_base_input_dir,
os.path.join(*(prj_consts.ChestXray_IMAGES_DIR_list)))
data_partitions_dir = os.path.join(data_base_output_dir,
os.path.join(*(prj_consts.DATA_PARTITIONS_DIR_list)))
label_path = os.path.join(data_partitions_dir,'labels14_unormalized_cleaned.pickle')
partition_path = os.path.join(data_partitions_dir, 'partition14_unormalized_cleaned.pickle')
model_file_name = 'azure_chest_xray_14_weights_712split_epoch_054_val_loss_191.2588.hdf5'
model = load_model(os.path.join(fully_trained_weights_dir, model_file_name))
model.save_weights(os.path.join(fully_trained_weights_dir, 'weights_only_'+model_file_name))
models_file_name= [os.path.join(fully_trained_weights_dir,
'weights_only_azure_chest_xray_14_weights_712split_epoch_054_val_loss_191.2588.hdf5')]
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
resized_height = 224
resized_width = 224
num_channel = 3
num_classes = 14
batch_size = 100 #512
def get_available_gpus():
"""
Returns: number of GPUs available in the system
"""
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
get_available_gpus()
# get number of available GPUs
print("num of GPUs:", len(get_available_gpus()))
num_gpu = get_available_gpus()
# get number of available GPUs
print("num of GPUs:", len(get_available_gpus()))
pathologies_name_list = prj_consts.DISEASE_list
pathologies_name_list
stanford_result = [0.8094, 0.9248, 0.8638, 0.7345, 0.8676, 0.7802, 0.7680, 0.8887, 0.7901, 0.8878, 0.9371, 0.8047,
0.8062, 0.9164]
with open(label_path, 'rb') as f:
labels = pickle.load(f)
with open(partition_path, 'rb') as f:
partition = pickle.load(f)
class DataGenSequence(Sequence):
def __init__(self, labels, image_file_index, current_state):
self.batch_size = batch_size
self.labels = labels
self.img_file_index = image_file_index
self.current_state = current_state
self.len = len(self.img_file_index) // self.batch_size
print("for DataGenSequence", current_state, "total rows are:", len(self.img_file_index), ", len is", self.len)
def __len__(self):
return self.len
def __getitem__(self, idx):
# print("loading data segmentation", idx)
# make sure each batch size has the same amount of data
current_batch = self.img_file_index[idx * self.batch_size: (idx + 1) * self.batch_size]
X = np.empty((self.batch_size, resized_height, resized_width, num_channel))
y = np.empty((self.batch_size, num_classes))
for i, image_name in enumerate(current_batch):
path = os.path.join(nih_chest_xray_data_dir, image_name)
# loading data
img = cv2.resize(cv2.imread(path), (resized_height, resized_width)).astype(np.float16)
X[i, :, :, :] = img
y[i, :] = labels[image_name]
# only do random flipping in training status
if self.current_state == 'train':
# this is different from the training code
x_augmented = X
else:
x_augmented = X
return x_augmented, y
# load test data
X_test = np.empty((len(partition['test']), 224, 224, 3), dtype=np.float16)
y_test = np.empty((len(partition['test']) - len(partition['test']) % batch_size, 14), dtype=np.float16)
for i, npy in enumerate(partition['test']):
if (i < len(y_test)):
# round to batch_size
y_test[i, :] = labels[npy]
print("len of result is", len(y_test))
y_pred_list = np.empty((len(models_file_name), len(partition['test']), 14), dtype=np.float16)
# individual models
for index, current_model_file in enumerate(models_file_name):
print(current_model_file)
# model = load_model(current_model_file)
model = azure_chestxray_keras_utils.build_model(keras_contrib.applications.densenet.DenseNetImageNet121); model.load_weights(current_model_file)
print('evaluation for model', current_model_file)
# y_pred = model.predict(X_test)
y_pred = model.predict_generator(generator=DataGenSequence(labels, partition['test'], current_state='test'),
workers=32, verbose=1, max_queue_size=1)
print("result shape", y_pred.shape)
# add one fake row of ones in both test and pred values to avoid:
# ValueError: Only one class present in y_true. ROC AUC score is not defined in that case.
y_test = np.insert(y_test, 0, np.ones((y_test.shape[1],)), 0)
y_pred = np.insert(y_pred, 0, np.ones((y_pred.shape[1],)), 0)
df = pd.DataFrame(columns=['Disease', 'Our AUC Score', 'Stanford AUC Score'])
for d in range(14):
df.loc[d] = [pathologies_name_list[d],
metrics.roc_auc_score(y_test[:, d], y_pred[:, d]),
stanford_result[d]]
df['Delta'] = df['Stanford AUC Score'] - df['Our AUC Score']
df.to_csv(current_model_file + ".csv", index=False)
print(df) | [
"yht18801169870@gmail.com"
] | yht18801169870@gmail.com |
0abaa1f067fce6941cc52d9f663902655fa72690 | 59976302a73b91a4e539428ec09cc6e419fb77ec | /TP_9_Matrices/TP3_EJ2A.py | 40ea87c97f77ae07122469acb617ce74c2d90d9a | [] | no_license | aguscoppe/ejercicios-python | 5f8e839cb4cb7d789555c30e1de37da7308f01b5 | cc001b520225e623a577ea6fce63c49eeee4803b | refs/heads/master | 2023-01-13T07:08:16.203962 | 2020-11-16T21:42:21 | 2020-11-16T21:42:21 | 302,476,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | def imprimirMatriz(matriz):
filas = len(matriz)
columnas = len(matriz[0])
for f in range(filas):
for c in range(columnas):
print("%3d" %matriz[f][c], end="")
print()
def rellenarMatriz(matriz):
filas = len(matriz)
columnas = len(matriz[0])
n = 1
for f in range(filas):
for c in range(columnas):
if c == f:
matriz[f][c] = n
n += 2
def crearMatriz():
n = int(input("Ingrese el tamño de la matriz: "))
filas = n
columnas = n
matriz = [[0] * columnas for i in range(filas)]
return matriz
# PROGRAMA PRINCIPAL
matriz = crearMatriz()
rellenarMatriz(matriz)
print()
imprimirMatriz(matriz) | [
"noreply@github.com"
] | noreply@github.com |
09f59f6939f829eb7698d0e10156b86e1d594a15 | 1c829b9f786a544f4cd715072af49b05f2aca545 | /test/candleIterator.py | 364c5af80257d81dbde53eb8e76686c23b70ddc3 | [] | no_license | Colman/Trading | 05f5a32b85ee927151b6a1948e5cac61021c48ac | 71cd5da318d3c56b763605b91a456b3cfaacd479 | refs/heads/master | 2020-12-26T23:50:30.702411 | 2020-02-01T23:56:50 | 2020-02-01T23:56:50 | 237,694,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,991 | py | from .tools import getNested
from trading import Chart
import math
import copy
import time
class CandleIterator:
_section = None
_startIndex = 0
_endIndex = -1
_index = 0
_offsets = None
_factors = None
def __init__(self, section):
self._section = section
self._startIndex = getNested(section.buffers, section.iterateKey) - 1
self._iterateWidth = Chart.widthToSec(section.iterateKey[1])
self._endIndex = len(getNested(section.candles, section.iterateKey))
self._index = self._startIndex
self._getOffsets()
def __iter__(self):
return self
def __next__(self):
if self._index >= self._endIndex:
raise StopIteration
candles = self._section.candles
newCandles = {}
for pair in candles:
newCandles[pair] = {}
for width in candles[pair]:
offset = self._offsets[pair][width]
factor = self._factors[pair][width]
index = math.floor((self._index + offset) / factor)
buff = self._section.buffers[pair][width]
temp = candles[pair][width][index - buff + 1: index]
startTime = getNested(candles, self._section.iterateKey)[self._index][0]
endTime = startTime + self._iterateWidth
lastCandle = self._section.getLastCandle(pair, width, endTime)
temp.append(lastCandle)
newCandles[pair][width] = temp
self._index += 1
return newCandles
def _getOffsets(self):
candles = self._section.candles
iterateWidth = Chart.widthToSec(self._section.iterateKey[1])
iterate = getNested(candles, self._section.iterateKey)
if len(iterate) == 0:
return
iterateTime = iterate[0][0]
self._offsets = {}
self._factors = {}
for key in candles:
self._offsets[key] = {}
self._factors[key] = {}
for width in candles[key]:
otherTime = candles[key][width][0][0]
factor = Chart.widthToSec(width) / iterateWidth
self._factors[key][width] = factor
self._offsets[key][width] = int((iterateTime - otherTime) / iterateWidth) | [
"noreply@github.com"
] | noreply@github.com |
5d2d82c183cdb19c50d16e09693e41fbb6537d84 | 29ff276739ccb2af3a2c19f811d8d45a4cbf7ce9 | /ConditionalProcessing2/conditionalProcessing2.py | 66ab96e7926d80cbcbb12d18873735461a730148 | [] | no_license | eruditehassan/fundamentals-of-programming-python-practice | 02fd11ec49b152e8b4d09dcf06e51a90e1b9d65c | e57a5bb631c7cfd73d3d4459b1000c008dfd972e | refs/heads/master | 2022-01-10T06:31:47.793646 | 2019-06-11T18:43:34 | 2019-06-11T18:43:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,236 | py | def starHalfTriangle():
"""A program that prints half star triangle"""
no_of_stars = 0
for stars in range(1,13):
no_of_stars+=1
print(no_of_stars * '*')
def customStarFormat():
"""A program that reads a number (between 1 and 30) and print a line containing that number of adjacent asterisks using for loop"""
number=int(input("How many asterisks you want to print? Enter a number between 1 and 30 \n"))
if 1 <= number <=30:
print(number * '*')
else:
print ("Number is not between 1 and 30")
def primeNumber():
"""A program to check whether a number entered by a user is a prime number or not"""
number = int(input("Enter any number"))
no_of_factors = 0
if number > 1:
for test in range(1,number+1):
if number%test==0:
no_of_factors+=1
if no_of_factors ==2:
print("Yes, it is a prime number")
elif no_of_factors > 2:
print("No, it is not a prime number")
def customNumPrintLoop():
"""A loop that prints out all numbers between 6 and 30 that are not divisible by 2, 3, and 5."""
for number in range(6,31):
if not(number%2==0 or number%3==0 or number%5==0):
print(number)
def cityListPrinter():
city_names = ['Karachi', 'Lahore', 'Islamabad', 'Istanbul', 'Melbourne', 'Paris', 'Berlin', 'London']
num = 0
for cities in city_names:
num +=1
print (num,".",cities)
def interestCalculator():
"""The program computes the interest accumulated on an account"""
principal=0
print("This program calculates the future value of investment for any number of years.")
principal = eval(input("Enter the initial principal amount: "))
apr = eval(input("Enter the annual interest rate: "))
years = int(input("Enter the number of years to calculate investment"))
for i in range(years):
principal = principal * (1 + apr)
print ("The value in", years, "years is:", principal)
def powerSequenceGenerator():
print("This program will calculate square, cube and fourth power of numbers from 1 to any number you give")
number = int(input("Enter the number uptil which you want to calculate"))
print("n \t n^2 \t n^3 \t n^4")
n = 0
for calculation in range(number):
n+=1
print(n, '\t', n**2, '\t', n**3, '\t', n**4)
| [
"51364060+eruditehassan@users.noreply.github.com"
] | 51364060+eruditehassan@users.noreply.github.com |
35a79b4fdf4e0ea3e84ae6fc05370aca40fda015 | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/cloud/eventarc/v1/eventarc-v1-py/google/cloud/eventarc_v1/services/eventarc/transports/grpc.py | f6847c60fe490f76ea1ee075399c4f7fea0ff76a | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,587 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.eventarc_v1.types import eventarc
from google.cloud.eventarc_v1.types import trigger
from google.longrunning import operations_pb2 # type: ignore
from .base import EventarcTransport, DEFAULT_CLIENT_INFO
class EventarcGrpcTransport(EventarcTransport):
"""gRPC backend transport for Eventarc.
Eventarc allows users to subscribe to various events that are
provided by Google Cloud services and forward them to supported
destinations.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'eventarc.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'eventarc.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def get_trigger(self) -> Callable[
[eventarc.GetTriggerRequest],
trigger.Trigger]:
r"""Return a callable for the get trigger method over gRPC.
Get a single trigger.
Returns:
Callable[[~.GetTriggerRequest],
~.Trigger]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_trigger' not in self._stubs:
self._stubs['get_trigger'] = self.grpc_channel.unary_unary(
'/google.cloud.eventarc.v1.Eventarc/GetTrigger',
request_serializer=eventarc.GetTriggerRequest.serialize,
response_deserializer=trigger.Trigger.deserialize,
)
return self._stubs['get_trigger']
@property
def list_triggers(self) -> Callable[
[eventarc.ListTriggersRequest],
eventarc.ListTriggersResponse]:
r"""Return a callable for the list triggers method over gRPC.
List triggers.
Returns:
Callable[[~.ListTriggersRequest],
~.ListTriggersResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_triggers' not in self._stubs:
self._stubs['list_triggers'] = self.grpc_channel.unary_unary(
'/google.cloud.eventarc.v1.Eventarc/ListTriggers',
request_serializer=eventarc.ListTriggersRequest.serialize,
response_deserializer=eventarc.ListTriggersResponse.deserialize,
)
return self._stubs['list_triggers']
@property
def create_trigger(self) -> Callable[
[eventarc.CreateTriggerRequest],
operations_pb2.Operation]:
r"""Return a callable for the create trigger method over gRPC.
Create a new trigger in a particular project and
location.
Returns:
Callable[[~.CreateTriggerRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_trigger' not in self._stubs:
self._stubs['create_trigger'] = self.grpc_channel.unary_unary(
'/google.cloud.eventarc.v1.Eventarc/CreateTrigger',
request_serializer=eventarc.CreateTriggerRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['create_trigger']
@property
def update_trigger(self) -> Callable[
[eventarc.UpdateTriggerRequest],
operations_pb2.Operation]:
r"""Return a callable for the update trigger method over gRPC.
Update a single trigger.
Returns:
Callable[[~.UpdateTriggerRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_trigger' not in self._stubs:
self._stubs['update_trigger'] = self.grpc_channel.unary_unary(
'/google.cloud.eventarc.v1.Eventarc/UpdateTrigger',
request_serializer=eventarc.UpdateTriggerRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['update_trigger']
@property
def delete_trigger(self) -> Callable[
[eventarc.DeleteTriggerRequest],
operations_pb2.Operation]:
r"""Return a callable for the delete trigger method over gRPC.
Delete a single trigger.
Returns:
Callable[[~.DeleteTriggerRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_trigger' not in self._stubs:
self._stubs['delete_trigger'] = self.grpc_channel.unary_unary(
'/google.cloud.eventarc.v1.Eventarc/DeleteTrigger',
request_serializer=eventarc.DeleteTriggerRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['delete_trigger']
__all__ = (
'EventarcGrpcTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
6c0e4ea1a74613b04f657d103905ed557e74cd28 | e262e64415335060868e9f7f73ab8701e3be2f7b | /.history/Test002/数据类型_20201205183212.py | 0498acedcb8af7250b1f43f9d0736323a1f60b37 | [] | no_license | Allison001/developer_test | 6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63 | b8e04b4b248b0c10a35e93128a5323165990052c | refs/heads/master | 2023-06-18T08:46:40.202383 | 2021-07-23T03:31:54 | 2021-07-23T03:31:54 | 322,807,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,787 | py | # fruits = ['orange', 'apple', 'pear', 'banana', 'kiwi', 'apple', 'banana']
# print(fruits.count("apple"))
# a = fruits.index("banana",4)
# print(a)
# fruits.reverse()
# print(fruits)
# fruits.append("daka")
# print(fruits)
# print(fruits.sort)
# a = fruits.pop(0)
# print(a)
# print(fruits)
# number = [1,2,45,3,7,24,3]
# print(number.sort(reverse=True))
# from collections import deque
# queue = deque(["Eric", "John", "Michael"])
# queue.append("Terry")
# queue.append("Graham")
# a= queue.popleft()
# print(a)
# b = queue.popleft()
# print(b)
# print(queue)
# number = [1,2,3,4]
# number.append(5)
# number.append(6)
# print(number)
# number.pop()
# number.pop()
# print(number)
# lista = []
# for i in range(1,10):
# lista.append(i**2)
# print(lista)
# number = list(map(lambda x: x**2, range(1,10)))
# print(number)
# number = [i**2 for i in range(1,10)]
# print(number)
# number1= [(x,y) for x in [1,2,3] for y in [3,1,4] if x != y]
# print(number1)
# lis2 = []
# for x in [1,2,3]:
# for y in [3,1,4]:
# if x != y:
# lis2.append(x,y)
# print(number1)
# ver = [1,2,3]
# lista = [i**2 for i in ver]
# print(lista)
# ver1 = [-1,-2,3,4,-5]
# list2 = [i**2 for i in ver1 if i>0]
# print(list2)
# list3 = [abs(i) for i in ver1]
# print(list3)
# freshfruit = [' banana', ' loganberry ', 'passion fruit ']
# ab = [i.strip() for i in freshfruit]
# print(ab)
# list4 =[(x,x**2) for x in range(10)]
# print(list4)
# ver =[[1,2,3],[4,5,6],[7,8,9]]
# list5 = [y for i in ver for y in i]
# print(list5)
# from math import pi
# pia = 1.1323123
# for i in range(6):
# print(round(pia,i))
# list6 = [round(pia,i) for i in range(6)]
# print(list6)
#交换行和列
row_col = [
[1,4,7],
[2,5,6],
[3,6,9]
] | [
"zhangyingxbba@gmail.com"
] | zhangyingxbba@gmail.com |
c1d6cc614de179239cd85b1aff00551fe5a70de7 | 9130bdbd90b7a70ac4ae491ddd0d6564c1c733e0 | /venv/lib/python3.8/site-packages/numpy/lib/tests/test_function_base.py | 26214a10c602d958990ec1631e13077862370069 | [] | no_license | baruwaa12/Projects | 6ca92561fb440c63eb48c9d1114b3fc8fa43f593 | 0d9a7b833f24729095308332b28c1cde63e9414d | refs/heads/main | 2022-10-21T14:13:47.551218 | 2022-10-09T11:03:49 | 2022-10-09T11:03:49 | 160,078,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | /home/runner/.cache/pip/pool/da/24/8d/ec9cb0de3f3cfb086257743551eecf0a43e5ea4e63881af9e8d6632865 | [
"45532744+baruwaa12@users.noreply.github.com"
] | 45532744+baruwaa12@users.noreply.github.com |
da080bc3ffe0ad4f0d4461acf3bf439970b3713b | d706f83450d32256e568ea2e279649b9d85ddb94 | /accounts/views.py | 8cd59810b95abf689b8f6bdf3151729484d2fb7d | [] | no_license | celord/advacneddjango | 146d3d4ae351803b37e8599225b38b948e42a8b7 | 044d172fb10556cdeede6888dcec5f466097754d | refs/heads/main | 2023-08-18T19:26:07.230821 | 2021-09-26T17:58:45 | 2021-09-26T17:58:45 | 406,921,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | # accounts/views.py
from django.urls import reverse_lazy
from django.views import generic
from .forms import CustomUserCreationForm
class SignupPageView(generic.CreateView):
form_class = CustomUserCreationForm
success_url = reverse_lazy('login')
template_name = 'registration/signup.html'
| [
"celord@gmail.com"
] | celord@gmail.com |
135a875898921530dc0d9ed13b5bd02d13a96cbc | ee2af8c0fdc65f44ed9a4295806d75fb09257b58 | /saif/google_api_integreation/__manifest__.py | db20488330aedd7f71c7ecfb68f2ce9990548508 | [] | no_license | sc4you/odoo-project | 02b81ff4920a69d3e79c5dcc605a794779c5a77c | 2ef439ef54f1165c3569a1047cd5cb6a0b50572e | refs/heads/master | 2020-03-21T13:34:52.555402 | 2018-03-19T12:26:39 | 2018-03-19T12:26:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | # -*- coding: utf-8 -*-
{
'name': 'Google Docs Integration',
'category': 'Extra Tools',
'summary': 'Spreadsheet, Document, Presentation',
'description': 'Google Docs Integration: This Module lets you to develop,'\
'read and modify Spreadsheet, Document, Presentation',
'author': 'Muhammad Awais',
'depends':['base','project','sale'],
'application': True,
'data': ['views/template.xml','security/security.xml','security/ir.model.access.csv'],
} | [
"ta.awaisajaz@gmail.com"
] | ta.awaisajaz@gmail.com |
972b653377c522b689ef5122c760bb3750ffce76 | 6578b86952bdfc50a1d13ac8ae271d43695d02ed | /setup.py | 85513e416a83639ce1d9950eeb55eb8d263452da | [
"MIT"
] | permissive | BotDevGroup/grammar_plugin | caa9dcfebaf78d153d1520cd27e282b39de8e63a | 9da7b50164912cb955dda25e71da6e6551e933e0 | refs/heads/master | 2020-03-20T20:05:10.170906 | 2018-06-17T17:55:04 | 2018-06-17T17:55:04 | 137,669,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,567 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
requirements = [
'marvinbot'
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='grammar_plugin',
version='0.1.0',
description="A plugin for marvinbot to check grammar using Languagetool",
long_description=readme,
author="Ricardo Cabral",
author_email='ricardo.arturo.cabral@gmail.com',
url='https://github.com/Cameri/grammar_plugin',
packages=[
'grammar_plugin',
],
package_dir={'grammar_plugin':
'grammar_plugin'},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='grammar_plugin',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements,
dependency_links=[
'git+ssh://git@github.com:BotDevGroup/marvin.git#egg=marvinbot',
],
)
| [
"ricardo.arturo.cabral@gmail.com"
] | ricardo.arturo.cabral@gmail.com |
6f87d67a78dea8a63ec6ae22d12f0f19ea1f3156 | d72e96389c479f67033931a010c954905d61b31d | /python/crashcourse/chap2/name_practice.py | 079baeac6efe22bb7e25c3568b4fb48b79192f6e | [] | no_license | kalima32/hello-world | 256c3aa9e996a7fb750aeaeaa2cf09591712a0ae | 214be215ab50602b694725fca882f95728ff03a8 | refs/heads/master | 2021-07-20T00:53:14.287219 | 2020-04-26T01:03:06 | 2020-04-26T01:03:06 | 140,461,871 | 0 | 0 | null | 2018-07-10T17:15:44 | 2018-07-10T16:45:32 | null | UTF-8 | Python | false | false | 739 | py | first_name = "bob"
last_name = "johnson"
whole_name = first_name.title() + " " + last_name.title()
famous_first_name = "groucho"
famous_last_name = "marx"
famous_whole_name = famous_first_name.title() + " " + famous_last_name.title()
print(f"This is practice with names!")
print(first_name.title() + " " + last_name.title())
print(first_name.upper() + " " + last_name.upper())
print(first_name.lower() + " " + last_name.lower())
print(f"Very nice to meet you {whole_name}! Welcome to my python world.")
print(f"\nNow I'll share one of my favorite quotes:\n")
print(f'\t{famous_whole_name} said, "Please accept my resignation.')
print(f"\tI don't want to belong to any club that accepts people.")
print(f'\tpeople like me as members."')
| [
"josh@osx-jcasto.local"
] | josh@osx-jcasto.local |
a04179ec631fa9ee2c77775b4b950d00ead1cff3 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_container/get.py | c8b1ce0c2a7ca85de612a46ed698cd5daf7180dc | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,674 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-machinelearningservices
# USAGE
python get.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = MachineLearningServicesMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-1111-2222-3333-444444444444",
)
response = client.registry_code_containers.get(
resource_group_name="testrg123",
registry_name="testregistry",
code_name="testContainer",
)
print(response)
# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/CodeContainer/get.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
91067497b8fa73992688003dd7a5316582a8352d | 6c6719d07123034f3f18e3dd87a3e7acb1a7c374 | /UniqueOrder.py | 398251a7686ba39f744823d90c975292f56d4cd1 | [] | no_license | HighlySupreme/CodeWars | bd7266c1b23f0bc38c453efa89933754ae090db8 | b61c567268ab63f3fed6b2b4d5eb6ff7789e8f9f | refs/heads/master | 2023-06-08T10:57:07.900201 | 2021-06-22T09:09:26 | 2021-06-22T09:09:26 | 375,947,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | # Implement the function unique_in_order which takes as argument a sequence and returns a list of items without any elements with the same value next to each other and preserving the original order of elements.
# For example:
# unique_in_order('AAAABBBCCDAABBB') == ['A', 'B', 'C', 'D', 'A', 'B']
# unique_in_order('ABBCcAD') == ['A', 'B', 'C', 'c', 'A', 'D']
# unique_in_order([1,2,2,3,3]) == [1,2,3]
# test.assert_equals(unique_in_order('AAAABBBCCDAABBB'), ['A','B','C','D','A','B'])
def unique_in_order(iterable):
newList = []
if len(iterable) < 1:
return []
newList.append(iterable[0])
for x in iterable[1:]:
if newList[-1] != x:
newList.append(x)
return newList
| [
"gal.1999k@gmail.com"
] | gal.1999k@gmail.com |
db62acc5b5c6704db566b47448faeaed2132e6ba | bb64d7194d9f7e8ef6fc2dbfdbc0569713d1079c | /FocalLoss.py | 74a05c5aa62338c5c30e91a1981482671095182f | [] | no_license | scott-mao/Top-Related-Meta-Learning-Method-for-Few-Shot-Detection | 471e7d6e71255333d9b4c929023d7e43ef19fdd2 | 49bfd702f41deaec60fa95314436f69b4e217e6f | refs/heads/main | 2023-04-11T13:00:13.358560 | 2021-04-27T02:24:23 | 2021-04-27T02:24:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,767 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Licensed under The MIT License [see LICENSE for details]
# Written by Chao CHEN (chaochancs@gmail.com)
# Created On: 2017-08-11
# --------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class FocalLoss(nn.Module):
r"""
This criterion is a implemenation of Focal Loss, which is proposed in
Focal Loss for Dense Object Detection.
Loss(x, class) = - \alpha (1-softmax(x)[class])^gamma \log(softmax(x)[class])
The losses are averaged across observations for each minibatch.
Args:
alpha(1D Tensor, Variable) : the scalar factor for this criterion
gamma(float, double) : gamma > 0; reduces the relative loss for well-classified examples (p > .5),
putting more focus on hard, misclassified examples
size_average(bool): size_average(bool): By default, the losses are averaged over observations for each minibatch.
However, if the field size_average is set to False, the losses are
instead summed for each minibatch.
"""
def __init__(self, class_num, alpha=None, gamma=2, size_average=False):
super(FocalLoss, self).__init__()
if alpha is None:
self.alpha = Variable(torch.Tensor([[0.25]]*class_num))
else:
if isinstance(alpha, Variable):
self.alpha = alpha
else:
self.alpha = Variable(alpha)
self.gamma = gamma
self.class_num = class_num
self.size_average = size_average
def forward(self, inputs, targets):
N = inputs.size(0)
#print(N)
C = inputs.size(1)
P = F.softmax(inputs,dim=1)
#class_mask = inputs.data.new(N, C).fill_(0)
#class_mask = Variable(class_mask)
ids = targets.unsqueeze(-1)
#class_mask.scatter_(1, ids.data, 1.)
#class_mask = Variable(class_mask)
#print(class_mask)
class_mask=Variable(torch.zeros(N,C).scatter_(1,ids,1.0).cuda())
if inputs.is_cuda and not self.alpha.is_cuda:
self.alpha = self.alpha.cuda()
#print(self.alpha,Variable(ids).data.view(-1))
alpha = self.alpha[ids.squeeze(-1).cuda()]
probs = (P*class_mask).sum(1).view(-1,1)
log_p = probs.log()
#print('probs size= {}'.format(probs.size()))
#print(probs)
batch_loss = -alpha*(torch.pow((1-probs), self.gamma))*log_p
#print('-----bacth_loss------')
#print(batch_loss)
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
if __name__ == "__main__":
alpha = torch.rand(21, 1)
print(alpha)
FL = FocalLoss(class_num=5, gamma=0 )
CE = nn.CrossEntropyLoss()
N = 4
C = 5
inputs = torch.rand(N, C)
targets = torch.LongTensor(N).random_(C)
inputs_fl = Variable(inputs.clone(), requires_grad=True)
targets_fl = Variable(targets.clone())
inputs_ce = Variable(inputs.clone(), requires_grad=True)
targets_ce = Variable(targets.clone())
print('----inputs----')
print(inputs)
print('---target-----')
print(targets)
fl_loss = FL(inputs_fl, targets_fl)
ce_loss = CE(inputs_ce, targets_ce)
print('ce = {}, fl ={}'.format(ce_loss.data[0], fl_loss.data[0]))
fl_loss.backward()
ce_loss.backward()
#print(inputs_fl.grad.data)
print(inputs_ce.grad.data)
| [
"noreply@github.com"
] | noreply@github.com |
a508b0946ef1eb6e66bea0da2040124e9cd0befb | d2b51283be8d71e9eab19124b9f8900d6724a0ef | /netmiko/linux/linux_ssh.py | 7527cd959551511159278f80cdbf05cbeff79bb0 | [
"MIT"
] | permissive | patel26jay/CONFIG | 0aa41467adb35ca59ffd637841630cad01ef1ae2 | 6cd139415f18df3e6e41f12fa0e38d239f14d6b8 | refs/heads/master | 2021-05-06T19:25:42.729045 | 2017-11-27T01:28:38 | 2017-11-27T01:28:38 | 112,130,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,367 | py | from __future__ import unicode_literals
import re
import socket
import time
from netmiko.cisco_base_connection import CiscoSSHConnection
from netmiko.ssh_exception import NetMikoTimeoutException
class LinuxSSH(CiscoSSHConnection):
def session_preparation(self):
"""Prepare the session after the connection has been established."""
self.ansi_escape_codes = True
return super(LinuxSSH, self).session_preparation()
def disable_paging(self, *args, **kwargs):
"""Linux doesn't have paging by default."""
return ""
def set_base_prompt(self, pri_prompt_terminator='$',
alt_prompt_terminator='#', delay_factor=1):
"""Determine base prompt."""
return super(LinuxSSH, self).set_base_prompt(
pri_prompt_terminator=pri_prompt_terminator,
alt_prompt_terminator=alt_prompt_terminator,
delay_factor=delay_factor)
def send_config_set(self, config_commands=None, exit_config_mode=True, **kwargs):
"""Can't exit from root (if root)"""
if self.username == "root":
exit_config_mode = False
return super(LinuxSSH, self).send_config_set(config_commands=config_commands,
exit_config_mode=exit_config_mode,
**kwargs)
def check_config_mode(self, check_string='#'):
"""Verify root"""
return self.check_enable_mode(check_string=check_string)
def config_mode(self, config_command='sudo su'):
"""Attempt to become root."""
return self.enable(cmd=config_command)
def exit_config_mode(self, exit_config='exit'):
return self.exit_enable_mode(exit_command=exit_config)
def check_enable_mode(self, check_string='#'):
"""Verify root"""
return super(LinuxSSH, self).check_enable_mode(check_string=check_string)
def exit_enable_mode(self, exit_command='exit'):
"""Exit enable mode."""
delay_factor = self.select_delay_factor(delay_factor=0)
output = ""
if self.check_enable_mode():
self.write_channel(self.normalize_cmd(exit_command))
time.sleep(.3 * delay_factor)
self.set_base_prompt()
if self.check_enable_mode():
raise ValueError("Failed to exit enable mode.")
return output
def enable(self, cmd='sudo su', pattern='ssword', re_flags=re.IGNORECASE):
"""Attempt to become root."""
delay_factor = self.select_delay_factor(delay_factor=0)
output = ""
if not self.check_enable_mode():
self.write_channel(self.normalize_cmd(cmd))
time.sleep(.3 * delay_factor)
try:
output += self.read_channel()
if re.search(pattern, output, flags=re_flags):
self.write_channel(self.normalize_cmd(self.secret))
self.set_base_prompt()
except socket.timeout:
raise NetMikoTimeoutException("Timed-out reading channel, data not available.")
if not self.check_enable_mode():
msg = "Failed to enter enable mode. Please ensure you pass " \
"the 'secret' argument to ConnectHandler."
raise ValueError(msg)
return output
| [
"patel26jay@gmail.com"
] | patel26jay@gmail.com |
9a2de85db36568de7db8a1ae872a1bf8e2e28c9e | 4470e03f1e2843f67a36c89c9c92b4b98b903d30 | /Artificial Intelligence/multiagent/try/multiAgents2.py | 10225b7a5977de44f5ca549c6d102270cb3f4f3f | [] | no_license | pkgishere/Academic-Projects | 2a7a3120179af4709a95b4445f7ae33e1ba14dea | c5aeb81190bab6f1be1e0da0fe2beb33f903af97 | refs/heads/master | 2021-08-16T05:03:10.932407 | 2017-11-19T01:02:08 | 2017-11-19T01:02:08 | 110,476,914 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,218 | py | # multiAgents.py
# --------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from util import manhattanDistance
from util import EuclideanDistance
from game import Directions
import random, util
from game import Agent
class ReflexAgent(Agent):
"""
A reflex agent chooses an action at each choice point by examining
its alternatives via a state evaluation function.
The code below is provided as a guide. You are welcome to change
it in any way you see fit, so long as you don't touch our method
headers.
"""
def getAction(self, gameState):
"""
You do not need to change this method, but you're welcome to.
getAction chooses among the best options according to the evaluation function.
Just like in the previous project, getAction takes a GameState and returns
some Directions.X for some X in the set {North, South, West, East, Stop}
"""
# Collect legal moves and successor states
legalMoves = gameState.getLegalActions()
# Choose one of the best actions
scores = [self.evaluationFunction(gameState, action) for action in legalMoves]
bestScore = max(scores)
bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]
chosenIndex = random.choice(bestIndices) # Pick randomly among the best
"Add more of your code here if you want to"
return legalMoves[chosenIndex]
def evaluationFunction(self, currentGameState, action):
"""
Design a better evaluation function here.
The evaluation function takes in the current and proposed successor
GameStates (pacman.py) and returns a number, where higher numbers are better.
The code below extracts some useful information from the state, like the
remaining food (newFood) and Pacman position after moving (newPos).
newScaredTimes holds the number of moves that each ghost will remain
scared because of Pacman having eaten a power pellet.
Print out these variables to see what you're getting, then combine them
to create a masterful evaluation function.
"""
successorGameState = currentGameState.generatePacmanSuccessor(action)
newPos = successorGameState.getPacmanPosition()
newFood = successorGameState.getFood()
newGhostStates = successorGameState.getGhostStates()
newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]
List=[];
Tuple=();
manHattanList=[];
FoodDistance=0.0
for row,value in enumerate(newFood):
for column,Value in enumerate(value):
if(Value):
Tuple=(row,column)
List.append(Tuple)
manHattanList.append(manhattanDistance(newPos,Tuple))
if(len(manHattanList)==0):
manHattanList.append(0);
FoodDistance=min(manHattanList);
score=0.0
GhostDistance=-1000.0
EuclideanList=[]
GhostManhattanList=[]
for Ghost in newGhostStates:
GhostManhattanList.append(manhattanDistance(Ghost.getPosition(),newPos))
EuclideanList.append(EuclideanDistance(Ghost.getPosition(),newPos))
GhostDistance1=min(EuclideanList)
GhostDistance2=min(GhostManhattanList)
CapsuleDistance=float("inf");
CapsulesList=[];
for capsules in successorGameState.getCapsules():
CapsulesList.append(EuclideanDistance(capsules,newPos))
if(successorGameState.getCapsules()):
CapsuleDistance=min(CapsulesList)
if(CapsuleDistance<GhostDistance2):
score+= 1
if(successorGameState.isWin()):
return float("inf")
Value=0
if(GhostDistance1<=2):
if(GhostDistance2==1):
Value = 100
elif(GhostDistance2<2):
Value = 10
else:
Value = 5
if(min(newScaredTimes)>4):
Value=-Value
score = score - Value
score+=float(1)/float(len(List))
score += float(1.0)/float(FoodDistance)
return successorGameState.getScore()+score
def scoreEvaluationFunction(currentGameState):
"""
This default evaluation function just returns the score of the state.
The score is the same one displayed in the Pacman GUI.
This evaluation function is meant for use with adversarial search agents
(not reflex agents).
"""
return currentGameState.getScore()
class MultiAgentSearchAgent(Agent):
"""
This class provides some common elements to all of your
multi-agent searchers. Any methods defined here will be available
to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.
You *do not* need to make any changes here, but you can if you want to
add functionality to all your adversarial search agents. Please do not
remove anything, however.
Note: this is an abstract class: one that should not be instantiated. It's
only partially specified, and designed to be extended. Agent (game.py)
is another abstract class.
"""
def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2'):
self.index = 0 # Pacman is always agent index 0
self.evaluationFunction = util.lookup(evalFn, globals())
self.depth = int(depth)
class MinimaxAgent(MultiAgentSearchAgent):
def Max(self, gameState, depth, AgentNo ):
if depth==self.depth or gameState.isWin() or gameState.isLose():
return self.evaluationFunction(gameState)
MaxEval=float("-inf")
for action in gameState.getLegalActions(AgentNo):
successor = gameState.generateSuccessor(AgentNo, action)
MaxEval=max(MaxEval,self.Min(successor, depth, AgentNo+1))
return MaxEval
def Min(self, gameState, depth, AgentNo):
if depth==self.depth or gameState.isWin() or gameState.isLose():
return self.evaluationFunction(gameState)
MinEval=float("inf")
for action in gameState.getLegalActions(AgentNo):
if AgentNo== gameState.getNumAgents() -1 :
Value=self.Max(gameState.generateSuccessor(AgentNo,action),depth + 1 ,0)
else:
Value=self.Min(gameState.generateSuccessor(AgentNo,action),depth,AgentNo+1)
MinEval=min(Value,MinEval)
return MinEval
def getAction(self, gameState):
Value=float("-inf")
Action=Directions.STOP
for action in gameState.getLegalActions(0):
Val=self.Min(gameState.generateSuccessor(0,action),0,1)
if(Val > Value):
Value = Val
Action=action
return Action
class AlphaBetaAgent(MultiAgentSearchAgent):
def getAction(self, gameState):
"""
Returns the minimax action using self.depth and self.evaluationFunction
"""
"*** YOUR CODE HERE ***"
Value=float('-inf')
Alpha=float('-inf')
Beta=float('inf')
Action=Directions.STOP
for action in gameState.getLegalActions(0):
Val=self.Min(gameState.generateSuccessor(0,action),0,1,Alpha,Beta)
if(Val > Value):
Value = Val
Action=action
if(Value > Beta):
return Action
Alpha=max(Value,Alpha)
return Action
def Max(self, gameState, depth, AgentNo, Alpha,Beta ):
if depth==self.depth or gameState.isWin() or gameState.isLose():
return self.evaluationFunction(gameState)
MaxEval=float('-inf')
for action in gameState.getLegalActions(AgentNo):
if(len(action))==0:
return self.evaluationFunction(gameState)
successor = gameState.generateSuccessor(AgentNo, action)
MaxEval=max(MaxEval,self.Min(successor, depth, AgentNo+1,Alpha,Beta))
if (MaxEval > Beta):
return MaxEval
Alpha=max(Alpha,MaxEval)
return MaxEval
def Min(self, gameState, depth, AgentNo,Alpha,Beta):
if depth==self.depth or gameState.isWin() or gameState.isLose():
return self.evaluationFunction(gameState)
MinEval=float('inf')
for action in gameState.getLegalActions(AgentNo):
if(len(action)==0):
return self.evaluationFunction(gameState)
if AgentNo== gameState.getNumAgents() -1 :
Value=self.Max(gameState.generateSuccessor(AgentNo,action),depth + 1 ,0,Alpha,Beta)
else:
Value=self.Min(gameState.generateSuccessor(AgentNo,action),depth,AgentNo+1,Alpha,Beta)
MinEval=min(Value,MinEval)
if(MinEval< Alpha):
return MinEval
Beta=min(Beta,MinEval)
return MinEval
class ExpectimaxAgent(MultiAgentSearchAgent):
def Expectation(self, gameState,depth,AgentNo):
if depth == self.depth or gameState.isWin() or gameState.isLose():
return self.evaluationFunction(gameState)
Eval = 0
for action in gameState.getLegalActions(AgentNo):
if AgentNo == gameState.getNumAgents() - 1:
Eval += self.Max(gameState.generateSuccessor(AgentNo, action),depth +1 ,0)
else:
Eval =Eval+ self.Expectation(gameState.generateSuccessor(AgentNo, action),depth,AgentNo +1)
Eval = float(Eval)/float(len(gameState.getLegalActions(AgentNo)))
return Eval
def Max(self, gameState, depth, AgentNo ):
if depth==self.depth or gameState.isWin() or gameState.isLose():
return self.evaluationFunction(gameState)
MaxEval=float("-inf")
for action in gameState.getLegalActions(AgentNo):
successor = gameState.generateSuccessor(AgentNo, action)
MaxEval=max(MaxEval,self.Min(successor, depth, AgentNo+1))
return MaxEval
def Min(self, gameState, depth, AgentNo):
if depth==self.depth or gameState.isWin() or gameState.isLose():
return self.evaluationFunction(gameState)
MinEval=float("inf")
for action in gameState.getLegalActions(AgentNo):
if AgentNo== gameState.getNumAgents() -1 :
Value=self.Max(gameState.generateSuccessor(AgentNo,action),depth + 1 ,0)
else:
Value=self.Min(gameState.generateSuccessor(AgentNo,action),depth,AgentNo+1)
MinEval=min(Value,MinEval)
return MinEval
def getAction(self, gameState):
"""
Returns the expectimax action using self.depth and self.evaluationFunction
All ghosts should be modeled as choosing uniformly at random from their
legal moves.
"""
"*** YOUR CODE HERE ***"
actions = gameState.getLegalActions(0)
Eval = float('-inf')
nextAction = Directions.STOP
for action in actions:
VAL = self.Expectation(gameState.generateSuccessor(0, action),0,1)
if VAL > Eval:
Eval = VAL
nextAction = action
return nextAction
def betterEvaluationFunction(currentGameState):
newPos = currentGameState.getPacmanPosition()
newGhostStates = currentGameState.getGhostStates()
newFood = currentGameState.getFood()
newScaredTimes= [ghostState.scaredTimer for ghostState in newGhostStates]
score = 0.0
List=[]
Tuple=()
manHattanList=[]
FoodDistance=0.0
for row,value in enumerate(newFood):
for column,Value in enumerate(value):
if(Value):
Tuple=(row,column)
List.append(Tuple)
manHattanList.append(manhattanDistance(newPos,Tuple))
if(len(manHattanList)==0):
manHattanList.append(0);
FoodDistance=min(manHattanList);
GhostDistance=-1000.0
EuclideanList=[]
GhostManhattanList=[]
for Ghost in newGhostStates:
GhostDistance1=manhattanDistance(Ghost.getPosition(),newPos)
GhostDistance2=EuclideanDistance(Ghost.getPosition(),newPos)
GhostManhattanList.append(GhostDistance1)
EuclideanList.append(GhostDistance2)
Value=0
if(GhostDistance1<=2):
if(GhostDistance2==1):
Value = 100
elif(GhostDistance2<2):
Value = 10
else:
Value = 5
if(min(newScaredTimes)>4):
Value=-Value
score = score - Value
CapsuleDistance=float("inf");
CapsulesList=[];
for capsules in currentGameState.getCapsules():
CapsulesList.append(EuclideanDistance(capsules,newPos))
if(currentGameState.getCapsules()):
CapsuleDistance=min(CapsulesList)
#if(CapsuleDistance<GhostDistance2):
# score+= 1
score+=float(1)/float(len(List)+1)
score += float(1.0)/float(FoodDistance+1)
return score + currentGameState.getScore()
# Abbreviation
better = betterEvaluationFunction
| [
"pkgishere@gmail.com"
] | pkgishere@gmail.com |
601405d16c40a5be1781f422a484d84140e8ee3e | 17f842c91b005ec5f04873863e83c3cbe2cc0808 | /node_modules/socket.io/node_modules/socket.io-client/node_modules/ws/build/config.gypi | c3a0a323c3949ecf80aa088aa8889b5a2926fcd3 | [
"MIT"
] | permissive | JeffKGabriel/Chatts | e04085d73dd1ffd9e991d4d16bfe25a499a3c4d5 | bcea9576f646dec1fa776601df6903d94bfd02c1 | refs/heads/master | 2016-09-05T11:55:50.436788 | 2013-04-05T19:32:51 | 2013-04-05T19:32:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,090 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 42,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "out/dist-osx/usr/local",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_systemtap": "false",
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"nodedir": "/Users/jeffreygabriel/.node-gyp/0.10.2",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"shell": "/bin/bash",
"parseable": "",
"userignorefile": "/Users/jeffreygabriel/.npmignore",
"cache_max": "null",
"init_author_email": "",
"sign_git_tag": "",
"ignore": "",
"long": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"npat": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"cache_lock_retries": "10",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"coverage": "",
"json": "",
"pre": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/jeffreygabriel/.npm-init.js",
"userconfig": "/Users/jeffreygabriel/.npmrc",
"npaturl": "http://npat.npmjs.org/",
"node_version": "v0.10.2",
"user": "",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"optional": "true",
"username": "",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "null",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"yes": "",
"fetch_retry_maxtimeout": "60000",
"strict_ssl": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/jeffreygabriel/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "node/v0.10.2 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "0.0.0",
"umask": "18",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/9g/9gomdDC0HIW3tD4od8ipIk+++TI/-Tmp-/",
"unsafe_perm": "true",
"link": "",
"prefix": "/usr/local"
}
}
| [
"jeffreygabriel@Jeffrey-Gabriels-MacBook-Pro.local"
] | jeffreygabriel@Jeffrey-Gabriels-MacBook-Pro.local |
766945fca1dafdff946a66682bf3cdc66afaa281 | fa774cfc17462e469aaa977f2b8841cc542e5c31 | /interview-prep/trees&graphs/excs12/pathsWithSum.py | 6d548c455c966ca862eaee6fe9f0abe778d88e04 | [] | no_license | kristopherrollert/daily-coding | dc5319da272d1de3350bc862a6539e9214a3c28a | b1e500722919cc9571f7b2edf81bcee1a8dc32e3 | refs/heads/master | 2020-09-01T22:27:50.968008 | 2020-01-02T19:27:47 | 2020-01-02T19:27:47 | 219,074,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,073 | py | # Paths with Sum: You are given a binary tree in which each node contains an
# integer value (which might be positive or negative). Design an algorithm to
# count the number of paths that sum to a given value. The path does not need to
# start or end at the root or leaf, but it must go downwards (traveling only
# from parent nodes to child nodes).
# I have no idea how to solve this. Right now it seems like brute force is the
# move. Once again, they say binary tree but DONT say if it is a binary search
# tree. Sometimes when they say binary tree it is and sometimes it isn't it is
# annoying.
class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def pathsWithSum():
a = Node(1)
b = Node(-2)
c = Node(4)
d = Node(5)
a.left = b
a.right = c
b.left = d
succ = helper2(a, 5, 0, {})
print(succ)
def helper(pastSum, node, succ, val, dict):
if (pastSum + node.val == val):
succ += 1
if (node.left is not None):
[succ, dict] = helper(pastSum + node.val, node.left, succ, val, dict)
[succ, dict] = helper(0, node.left, succ, val, dict)
if (node.right is not None):
[succ, dict] = helper(pathSum + node.val, node.right, succ, val)
[succ, dict] = helper(0, node.right, succ, val)
return succ
def helper2(node, targetSum, runningSum, pathCount):
if (node is None):
return 0;
runningSum += node.val
sum = runningSum - targetSum
totalPaths = pathCount.get(sum, 0)
if (runningSum == targetSum):
totalPaths += 1
print(node.val, pathCount)
inc(pathCount, runningSum, 1)
totalPaths += helper2(node.left, targetSum, runningSum, pathCount)
totalPaths += helper2(node.right, targetSum, runningSum, pathCount)
inc(pathCount, runningSum, -1)
return totalPaths
def inc(dict, key, delta):
newCount = dict.get(key, 0) + delta
if (newCount == 0):
del dict[key]
else:
dict[key] = newCount
if __name__ == "__main__":
pathsWithSum()
| [
"krollert@ucsc.edu"
] | krollert@ucsc.edu |
b96de974ca34505ea68a7002e1eaca1fdf7e1661 | 076e0ebd618ed406808e9009a70d886e8bdb1bbf | /grafeas/grafeas_v1/__init__.py | 98e1ad1c8c28a4c25705f3c56a2ad03ad7d539b0 | [
"Apache-2.0"
] | permissive | isabella232/python-grafeas | 8edb1c3b79e51292f1612489775b51a96033049c | a806330d0f344eb0b97e351d7e5ba34b8ae9b740 | refs/heads/master | 2022-12-15T09:53:51.979968 | 2020-09-22T22:15:19 | 2020-09-22T22:15:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,916 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.grafeas import GrafeasClient
from .types.attestation import AttestationNote
from .types.attestation import AttestationOccurrence
from .types.build import BuildNote
from .types.build import BuildOccurrence
from .types.common import NoteKind
from .types.common import RelatedUrl
from .types.common import Signature
from .types.cvss import CVSSv3
from .types.deployment import DeploymentNote
from .types.deployment import DeploymentOccurrence
from .types.discovery import DiscoveryNote
from .types.discovery import DiscoveryOccurrence
from .types.grafeas import BatchCreateNotesRequest
from .types.grafeas import BatchCreateNotesResponse
from .types.grafeas import BatchCreateOccurrencesRequest
from .types.grafeas import BatchCreateOccurrencesResponse
from .types.grafeas import CreateNoteRequest
from .types.grafeas import CreateOccurrenceRequest
from .types.grafeas import DeleteNoteRequest
from .types.grafeas import DeleteOccurrenceRequest
from .types.grafeas import GetNoteRequest
from .types.grafeas import GetOccurrenceNoteRequest
from .types.grafeas import GetOccurrenceRequest
from .types.grafeas import ListNoteOccurrencesRequest
from .types.grafeas import ListNoteOccurrencesResponse
from .types.grafeas import ListNotesRequest
from .types.grafeas import ListNotesResponse
from .types.grafeas import ListOccurrencesRequest
from .types.grafeas import ListOccurrencesResponse
from .types.grafeas import Note
from .types.grafeas import Occurrence
from .types.grafeas import UpdateNoteRequest
from .types.grafeas import UpdateOccurrenceRequest
from .types.image import Fingerprint
from .types.image import ImageNote
from .types.image import ImageOccurrence
from .types.image import Layer
from .types.package import Architecture
from .types.package import Distribution
from .types.package import Location
from .types.package import PackageNote
from .types.package import PackageOccurrence
from .types.package import Version
from .types.provenance import AliasContext
from .types.provenance import Artifact
from .types.provenance import BuildProvenance
from .types.provenance import CloudRepoSourceContext
from .types.provenance import Command
from .types.provenance import FileHashes
from .types.provenance import GerritSourceContext
from .types.provenance import GitSourceContext
from .types.provenance import Hash
from .types.provenance import ProjectRepoId
from .types.provenance import RepoId
from .types.provenance import Source
from .types.provenance import SourceContext
from .types.upgrade import UpgradeDistribution
from .types.upgrade import UpgradeNote
from .types.upgrade import UpgradeOccurrence
from .types.upgrade import WindowsUpdate
from .types.vulnerability import Severity
from .types.vulnerability import VulnerabilityNote
from .types.vulnerability import VulnerabilityOccurrence
__all__ = (
"AliasContext",
"Architecture",
"Artifact",
"AttestationNote",
"AttestationOccurrence",
"BatchCreateNotesRequest",
"BatchCreateNotesResponse",
"BatchCreateOccurrencesRequest",
"BatchCreateOccurrencesResponse",
"BuildNote",
"BuildOccurrence",
"BuildProvenance",
"CVSSv3",
"CloudRepoSourceContext",
"Command",
"CreateNoteRequest",
"CreateOccurrenceRequest",
"DeleteNoteRequest",
"DeleteOccurrenceRequest",
"DeploymentNote",
"DeploymentOccurrence",
"DiscoveryNote",
"DiscoveryOccurrence",
"Distribution",
"FileHashes",
"Fingerprint",
"GerritSourceContext",
"GetNoteRequest",
"GetOccurrenceNoteRequest",
"GetOccurrenceRequest",
"GitSourceContext",
"Hash",
"ImageNote",
"ImageOccurrence",
"Layer",
"ListNoteOccurrencesRequest",
"ListNoteOccurrencesResponse",
"ListNotesRequest",
"ListNotesResponse",
"ListOccurrencesRequest",
"ListOccurrencesResponse",
"Location",
"Note",
"NoteKind",
"Occurrence",
"PackageNote",
"PackageOccurrence",
"ProjectRepoId",
"RelatedUrl",
"RepoId",
"Severity",
"Signature",
"Source",
"SourceContext",
"UpdateNoteRequest",
"UpdateOccurrenceRequest",
"UpgradeDistribution",
"UpgradeNote",
"UpgradeOccurrence",
"Version",
"VulnerabilityNote",
"VulnerabilityOccurrence",
"WindowsUpdate",
"GrafeasClient",
)
| [
"noreply@github.com"
] | noreply@github.com |
e371120a4587f7edeed803eaedf3fa2de529f2e3 | 26ac73a3295abcd41d6124e05a62a775dc4111e9 | /src/ccl_malaria/logregs_fit.py | 047bdf03f160b3523ea921a3ac68ca6a19e38dc2 | [] | no_license | sdvillal/ccl-malaria | 78ed74740076981a51a301c2b6f2747eb18526dd | a28f7ef8f172c1374f5c079fdab8366333b2d56b | refs/heads/master | 2021-01-13T01:55:22.486971 | 2018-02-05T18:10:42 | 2018-02-05T18:10:42 | 17,605,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,959 | py | # coding=utf-8
"""Experiments with Morgan fingerprints and logistic regression (sklearn and vowpal wabbit)."""
from __future__ import print_function, division
from collections import OrderedDict
from copy import copy
import hashlib
from itertools import product
import os.path as op
import json
from time import time
import argh
import h5py
import joblib
from sklearn.base import clone
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.metrics import roc_auc_score
import numpy as np
from ccl_malaria import MALARIA_EXPS_ROOT, info
from minioscail.common.eval import cv_splits, enrichment_at
from ccl_malaria.features import MurmurFolder, MalariaFingerprintsExampleSet
from ccl_malaria.results import predict_malaria_unlabelled, save_molids
from minioscail.common.config import mlexp_info_helper
from minioscail.common.misc import ensure_dir
MALARIA_LOGREGS_EXPERIMENT_ROOT = op.join(MALARIA_EXPS_ROOT, 'logregs')
#######################################
# The data version we will work with
#######################################
def malaria_logreg_fpt_providers(folder):
"""Returns a tuple (rf_lab, rf_amb, rf_unl, rf_scr) with the example-sets used in the logreg experiments."""
rf_lab = MalariaFingerprintsExampleSet(dset='lab', remove_ambiguous=True, zero_dupes='all', folder=folder)
rf_unl = MalariaFingerprintsExampleSet(dset='unl', remove_ambiguous=False, zero_dupes='all', folder=folder)
rf_amb = MalariaFingerprintsExampleSet(dset='amb', zero_dupes='all', folder=folder)
rf_scr = MalariaFingerprintsExampleSet(dset='scr',
remove_ambiguous=False,
zero_dupes=None, # N.B. dupes do not matter with logreg,
# faster not to do it (at least when streaming in my tests)
folder=folder)
return rf_lab, rf_amb, rf_unl, rf_scr
#######################################
# FIT the logistic regression models
#######################################
@argh.arg('--cv-seeds', nargs='+', type=int)
def fit(dest_dir=MALARIA_LOGREGS_EXPERIMENT_ROOT,
# Logreg params
penalty='l1',
C=1.0,
class_weight_auto=False,
dual=False,
tol=1e-4,
fit_intercept=True,
intercept_scaling=1,
# CV params
num_cv_folds=10,
cv_seeds=(0,),
save_unlabelled_predictions=False,
save_fold_model=False,
min_fold_auc=0.88,
# Fingerprint folding params
fingerprint_folder_seed=0,
fingerprint_fold_size=1023,
# Computational requirements params
force=False,
chunksize=1000000,
max_logreg_tol=1E-5):
"""Logistic regression experiment using the liblinear wrapper in sklearn.
Generates cross-val results
"""
if max_logreg_tol is not None and tol < max_logreg_tol:
info('Ignoring long intolerant experiments')
return
info('Malaria logregs experiment')
# Command line type inference is rotten...
C = float(C)
tol = float(tol)
intercept_scaling = float(intercept_scaling)
num_cv_folds = int(num_cv_folds)
min_fold_auc = float(min_fold_auc)
fingerprint_folder_seed = int(fingerprint_folder_seed)
fingerprint_fold_size = int(fingerprint_fold_size)
chunksize = int(chunksize)
# Example providers
folder = None if fingerprint_fold_size < 1 else MurmurFolder(seed=fingerprint_folder_seed,
fold_size=fingerprint_fold_size)
rf_lab, rf_amb, rf_unl, rf_scr = malaria_logreg_fpt_providers(folder)
info('Data description: %s' % rf_lab.configuration().id(nonids_too=True))
# Experiment context: data
data_id = rf_lab.configuration().id(nonids_too=True)
data_dir = op.join(dest_dir, data_id)
ensure_dir(data_dir)
for cv_seed in cv_seeds:
# Command line type inference is rotten...
cv_seed = int(cv_seed)
# Deterministic randomness
my_rng = np.random.RandomState(seed=cv_seed)
# Experiment context: model
logreg_params = OrderedDict((
('penalty', penalty),
('C', C),
('class_weight', 'auto' if class_weight_auto else None),
('dual', dual),
('tol', tol),
('fit_intercept', fit_intercept),
('intercept_scaling', intercept_scaling),
('random_state', my_rng.randint(low=0, high=4294967294)),
# Changed, from original 1000**4, to make liblinear happy
))
model_setup = LogisticRegression(**logreg_params)
model_id = 'skllogreg__%s' % '__'.join(['%s=%s' % (k, str(v)) for k, v in logreg_params.items()])
model_dir = op.join(data_dir, model_id)
ensure_dir(model_dir)
info('Model: %s' % model_id)
# Experiment context: eval
eval_id = 'cv__cv_seed=%d__num_folds=%d' % (cv_seed, num_cv_folds)
eval_dir = op.join(model_dir, eval_id)
ensure_dir(eval_dir)
info('Eval: %d-fold cross validation (seed=%d)' % (num_cv_folds, cv_seed))
# Already done?
info_file = op.join(eval_dir, 'info.json')
if op.isfile(info_file) and not force:
info('\tAlready done, skipping...')
return # Oh well, a lot have been done up to here... rework somehow
# Anytime we see this file, we know we need to stop
stop_computing_file = op.join(eval_dir, 'STOP_BAD_FOLD')
# --- Time to work!
# Save model config
joblib.dump(model_setup, op.join(model_dir, 'model_setup.pkl'), compress=3)
# Read labelled data in
info('Reading data...')
X, y = rf_lab.Xy()
info('ne=%d; nf=%d' % rf_lab.X().shape)
# Save molids... a bit too ad-hoc...
save_molids(data_dir, 'lab', rf_lab.ids())
if save_unlabelled_predictions:
save_molids(data_dir, 'unl', rf_unl.ids())
save_molids(data_dir, 'scr', rf_scr.ids())
save_molids(data_dir, 'amb', rf_amb.ids())
# Save folding information.
# By now, all the folds have already been computed:
# - because we cached X
# - and in this case we are warranted that no new unfolded features will appear at test time
if folder is not None:
info('Saving the map folded_features -> unfolded_feature...')
folded2unfolded_file = op.join(data_dir, 'folded2unfolded.h5')
if not op.isfile(folded2unfolded_file):
with h5py.File(folded2unfolded_file) as h5:
h5['f2u'] = folder.folded2unfolded()
folder_light_file = op.join(data_dir, 'folder.pkl')
if not op.isfile(folder_light_file):
folder_light = copy(folder) # Shallow copy
folder_light.clear_cache()
joblib.dump(folder_light, folder_light_file, compress=3)
# Cross-val splitter
cver = cv_splits(num_points=len(y),
Y=y,
num_folds=num_cv_folds,
rng=my_rng,
stratify=True)
# Fit and classify
for cv_fold_num in range(num_cv_folds):
fold_info_file = op.join(eval_dir, 'fold=%d__info.json' % cv_fold_num)
if op.isfile(fold_info_file):
info('Fold %d already done, skipping' % cv_fold_num)
continue
if op.isfile(stop_computing_file):
info('Bad fold detected, no more computations required')
break
# Split into train/test
train_i, test_i = cver(cv_fold_num)
Xtrain, ytrain = X[train_i, :], y[train_i]
Xtest, ytest = X[test_i, :], y[test_i]
assert len(set(train_i) & set(test_i)) == 0
# Copy the model...
model = clone(model_setup)
start = time()
info('Training...')
model.fit(Xtrain, ytrain)
train_time = time() - start
info('Model fitting has taken %.2f seconds' % train_time)
if save_fold_model:
info('Saving trained model')
joblib.dump(model, op.join(eval_dir, 'fold=%d__fitmodel.pkl' % cv_fold_num), compress=3)
info('Predicting and saving results...')
with h5py.File(op.join(eval_dir, 'fold=%d__scores.h5' % cv_fold_num), 'w') as h5:
start = time()
# Test indices
h5['test_indices'] = test_i
# Model
h5['logreg_coef'] = model.coef_
h5['logreg_intercept'] = model.intercept_
# Test examples
info('Scoring test...')
scores_test = model.predict_proba(Xtest)
fold_auc = roc_auc_score(ytest, scores_test[:, 1])
fold_enrichment5 = enrichment_at(ytest, scores_test[:, 1], percentage=0.05)
info('Fold %d ROCAUC: %.3f' % (cv_fold_num, fold_auc))
info('Fold %d Enrichment at 5%%: %.3f' % (cv_fold_num, fold_enrichment5))
h5['test'] = scores_test.astype(np.float32)
if save_unlabelled_predictions:
predict_malaria_unlabelled(model,
h5,
rf_amb=rf_amb,
rf_scr=rf_scr,
rf_unl=rf_unl,
chunksize=chunksize)
test_time = time() - start
info('Predicting has taken %.2f seconds' % test_time)
# Finally save meta-information for the fold
metainfo = mlexp_info_helper(
title='malaria-trees-oob',
data_setup=data_id,
model_setup=model_id,
exp_function=fit,
)
metainfo.update((
('train_time', train_time),
('test_time', test_time),
('auc', fold_auc),
('enrichment5', fold_enrichment5),
))
with open(fold_info_file, 'w') as writer:
json.dump(metainfo, writer, indent=2, sort_keys=False)
# One last thing, should we stop now?
if fold_auc < min_fold_auc:
stop_message = 'The fold %d was bad (auc %.3f < %.3f), skipping the rest of the folds' % \
(cv_fold_num, fold_auc, min_fold_auc)
info(stop_message)
with open(stop_computing_file, 'w') as writer:
writer.write(stop_message)
# Summarize cross-val in the info file
metainfo = mlexp_info_helper(
title='malaria-logregs-cv',
data_setup=data_id,
model_setup=model_id,
exp_function=fit,
)
metainfo.update((
('num_cv_folds', num_cv_folds),
('cv_seed', cv_seed),
))
metainfo.update(logreg_params.items())
with open(info_file, 'w') as writer:
json.dump(metainfo, writer, indent=2, sort_keys=False)
#######################################
# Generate command lines for many logreg experiments
#######################################
def sha_for_cl(cl):
params = cl.partition('fit-logregs ')[2].partition(' &>')[0]
return hashlib.sha256(params).hexdigest()
def cl(with_time=False):
"""Generate command lines for different experiments."""
all_commands = []
def gen_cl(num_foldss=(10,),
cv_seedsss=((0, 1, 2, 3, 4),), # FIXME: do not use a special case, it breaks parameters shas
penalties=('l1', 'l2'),
Cs=(0.001, 0.01, 0.1, 0.5, 1, 5, 10, 100, 1000),
class_autos=(True, False),
tols=(1E-4,),
duals=(False,),
fingerprint_fold_sizes=(0, 255, 511, 1023, 2047, 4095, 8191, 16383, 32767, 65537, 131073),
fingerprint_folder_seeds=(0, 1)):
"""
Generates command lines for the logistic regression tasks.
The default params are these used for the "not so crazy" experiment (stopped on Sunday morning)
"""
for i, (num_folds, cv_seeds, penalty, C, class_auto, tol, dual, ff_size, ff_seed) in \
enumerate(product(num_foldss, cv_seedsss, penalties, Cs, class_autos, tols, duals,
fingerprint_fold_sizes, fingerprint_folder_seeds)):
params = (
'--num-cv-folds %d' % num_folds,
'--cv-seeds %s' % ' '.join(map(str, cv_seeds)),
'--penalty %s' % penalty,
'--C %g' % C,
'--class-weight-auto' if class_auto else None,
'--tol %g' % tol,
'--dual' if dual else None,
'--fingerprint-fold-size %d' % ff_size,
'--fingerprint-folder-seed %d' % ff_seed
)
params = ' '.join(filter(lambda x: x is not None, params))
cl = 'PYTHONUNBUFFERED=1 '
if with_time:
cl += '/usr/bin/time -v '
cl += 'ccl-malaria logregs fit '
cl += params
cl += ' &>~/logreg-%s.log' % hashlib.sha256(params).hexdigest()
all_commands.append(cl)
#########################
#
# There are three basic tasks we want to do:
#
# 1- Logreg param selection:
# For this we would only need 1 cv seeds and 1 fpt seed, as the results are clearly consistent
# accross folds. Probably no need to do together with fp exploration (can reduce the number
# of fold sizes greatly). We would like to explore also at least tolerances and duals.
# We might want to use less number of folds (e.g. just 5 --> from 90% to 75% train size).
#
# 2- Fingerprint strategy exploration:
# We would just stick with what is done in the previous. An alternative that would be faster
# is to do parameter selection just with unfolded fingerprints (as anyway that is what we
# plan to do) and then apply the best logreg parameters to this phase. We could miss
# interactions but, oh well, that is life. This second faster way is what Flo planned.
#
# 3- Final model explotation and interpretation:
# For this we would need (a) unfolded feature vectors only (b) maybe more cvseeds (c) maybe boosting.
# This phase only depends on phase 1 and it is what we need to generate the predictions and interpretations.
# We could stick with Flo's insights and just use a few big Cs, l1 and class weights.
#
#########################
#
# From sklearn implementation:
# dual : boolean
# Dual or primal formulation. Dual formulation is only implemented for l2 penalty.
# Prefer dual=False when n_samples > n_features.
# So we should use dual when not using folding and regularizing via l2.
#
#########################
# Task 1: logreg parameter selection
gen_cl(num_foldss=(10,),
cv_seedsss=((0,),),
penalties=('l1', 'l2'),
Cs=(0.001, 0.01, 0.1, 0.5, 1, 5, 10, 100, 1000),
class_autos=(True, False),
tols=(1E-2, 1E-4), # 1E-6 Takes really long
duals=(False,),
fingerprint_fold_sizes=(0, 1023, 2047, 4095, 8191, 16383,),
fingerprint_folder_seeds=(0,))
# Task 2: fingerprint strategy exploration
gen_cl(num_foldss=(10,),
cv_seedsss=((0,),),
penalties=('l1',),
Cs=(1,),
class_autos=(True,),
tols=(1E-4,),
duals=(False,),
fingerprint_fold_sizes=(0, 255, 511, 1023, 2047, 4095, 8191, 16383, 32767, 65537, 131073),
fingerprint_folder_seeds=(0, 1, 2, 3))
# Task 3: deployment classifiers computation - only one long job...
gen_cl(num_foldss=(3, 5, 7, 10,),
cv_seedsss=((0,), (1,), (2,), (3,), (4,)),
penalties=('l1', 'l2',),
Cs=(1, 5,),
class_autos=(True,),
tols=(1E-4,),
duals=(False,),
fingerprint_fold_sizes=(0,),
fingerprint_folder_seeds=(0,))
# ---- Save the cls to files
all_commands = list(set(all_commands)) # Remove duplicates
# Proper balance of workloads between machines
destinies = (
('galileo', [], 0.30196078), # machine, cls, probability to be picked
('zeus', [], 0.25882353),
('str22', [], 0.18431373),
('strz', [], 0.25490196),
)
p_choice = [p for _, _, p in destinies]
rng = np.random.RandomState(2147483647)
for cl in all_commands:
_, your_destiny, _ = destinies[rng.choice(len(destinies), p=p_choice)]
your_destiny.append(cl)
# Save the selections
for name, cls, _ in destinies:
with open(op.join(op.dirname(__file__), '..', name), 'w') as writer:
writer.write('\n'.join(cls))
# Summary
total_cls = sum(len(cl) for _, cl, _ in destinies)
print('Total number of commands: %d' % total_cls)
for name, cls, p in destinies:
print('\t%s\t%d %g %g' % (name.ljust(30), len(cls), p, len(cls) / (total_cls + 1.)))
if __name__ == '__main__':
parser = argh.ArghParser()
parser.add_commands([cl, fit])
parser.dispatch()
# TODO: bring back from oscail configurable to model (urgent!) and eval (unnecessary, but good for consistency)
# TODO: use SGDClassifier to be able to use elastic net
# TODO: vowpal wabbit back to scene - it was the original idea for the tutorial!
| [
"sdvillal@gmail.com"
] | sdvillal@gmail.com |
2fc9220228c1f7be5a72a6931698bff8c43fa069 | f43d1ed4a7a0a89f011e21a380974f7aa94caad2 | /src/vgg19net.py | e935644faa019cadb09d034a59bae8c36f3b9866 | [
"MIT"
] | permissive | nirmorgo/vae-photo-masher | 60b62f60cef832381eb9d353a56ed85612e5f63e | 251d87d65d23ebca8a6fd4a0cc0249c310974540 | refs/heads/master | 2020-03-30T14:15:43.866076 | 2018-11-04T21:03:28 | 2018-11-04T21:03:28 | 151,309,559 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,336 | py | import numpy as np
import scipy.misc
import scipy.io
import tensorflow as tf
VGG_MODEL = 'saved_models/VGG19/imagenet-vgg-verydeep-19.mat'
# The mean to subtract from the input to the VGG model. This is the mean that
# when the VGG was used to train. Minor changes to this will make a lot of
# difference to the performance of model.
MEAN_VALUES = np.array([123.68, 116.779, 103.939]).reshape((1,1,1,3))
class VGG19():
"""
Returns a model for the purpose of 'painting' the picture.
Takes only the convolution layer weights and wrap using the TensorFlow
Conv2d, Relu and AveragePooling layer. VGG actually uses maxpool but
the paper indicates that using AveragePooling yields better results.
The last few fully connected layers are not used.
"""
def __init__(self, data_path=VGG_MODEL):
self.data_path = data_path
self.data = scipy.io.loadmat(data_path)
self.mean_pixel = MEAN_VALUES
self.layers = self.data['layers']
def _weights(self, layer, expected_layer_name):
"""
Return the weights and bias from the VGG model for a given layer.
"""
# Parsing the Mat file to get the pre-trained weights values. kind of ugly, i know...
wb = self.layers[0][layer][0][0][2]
W = wb[0][0]
b = wb[0][1]
layer_name = self.layers[0][layer][0][0][0][0]
assert layer_name == expected_layer_name
return W, b
def _conv2d_relu(self, prev_layer, layer, layer_name):
"""
Return the Conv2D + RELU layer using the weights, biases from the VGG
model at 'layer'.
"""
with tf.variable_scope(layer_name):
W, b = self._weights(layer, layer_name)
with tf.variable_scope('weights'):
W = tf.constant(W)
b = tf.constant(np.reshape(b, (b.size)))
out = tf.nn.conv2d(prev_layer, filter=W, strides=[1, 1, 1, 1], padding='SAME') + b
out = tf.nn.relu(out)
return out
def _avgpool(self, prev_layer, layer_name):
"""
Return the AveragePooling layer.
"""
with tf.variable_scope(layer_name):
out = tf.nn.avg_pool(prev_layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
return out
def preprocess(self, image):
'''
transfer from [-1,1] range to [0,255], and normalize by VGG19 mean value
'''
return (255/2) * image + (255/2) - self.mean_pixel
def unprocess(self, image):
return image + self.mean_pixel
def net(self, img):
# Constructs the graph model.
graph = {}
graph['input'] = self.preprocess(img)
graph['conv1_1'] = self._conv2d_relu(graph['input'], 0, 'conv1_1')
graph['conv1_2'] = self._conv2d_relu(graph['conv1_1'], 2, 'conv1_2')
graph['avgpool1'] = self._avgpool(graph['conv1_2'], 'avgpool1')
graph['conv2_1'] = self._conv2d_relu(graph['avgpool1'], 5, 'conv2_1')
graph['conv2_2'] = self._conv2d_relu(graph['conv2_1'], 7, 'conv2_2')
graph['avgpool2'] = self._avgpool(graph['conv2_2'], 'avgpool2')
graph['conv3_1'] = self._conv2d_relu(graph['avgpool2'], 10, 'conv3_1')
graph['conv3_2'] = self._conv2d_relu(graph['conv3_1'], 12, 'conv3_2')
graph['conv3_3'] = self._conv2d_relu(graph['conv3_2'], 14, 'conv3_3')
graph['conv3_4'] = self._conv2d_relu(graph['conv3_3'], 16, 'conv3_4')
graph['avgpool3'] = self._avgpool(graph['conv3_4'],'avgpool3')
graph['conv4_1'] = self._conv2d_relu(graph['avgpool3'], 19, 'conv4_1')
graph['conv4_2'] = self._conv2d_relu(graph['conv4_1'], 21, 'conv4_2')
graph['conv4_3'] = self._conv2d_relu(graph['conv4_2'], 23, 'conv4_3')
graph['conv4_4'] = self._conv2d_relu(graph['conv4_3'], 25, 'conv4_4')
graph['avgpool4'] = self._avgpool(graph['conv4_4'], 'avgpool4')
graph['conv5_1'] = self._conv2d_relu(graph['avgpool4'], 28, 'conv5_1')
graph['conv5_2'] = self._conv2d_relu(graph['conv5_1'], 30, 'conv5_2')
graph['conv5_3'] = self._conv2d_relu(graph['conv5_2'], 32, 'conv5_3')
graph['conv5_4'] = self._conv2d_relu(graph['conv5_3'], 34, 'conv5_4')
graph['avgpool5'] = self._avgpool(graph['conv5_4'], 'avgpool5')
return graph | [
"nirmorgo@gmail.com"
] | nirmorgo@gmail.com |
db9830c43f9f8310d1aa07d4442177ea58f86be6 | a6a211fb59f01a81f39ea9a8b3b2758eb27b54ee | /scientific_computing/boilerplate-polygon-area-calculator/shape_calculator.py | 35562df082d8582a2c3cc46a80a71c0a79308f32 | [
"MIT"
] | permissive | gantro/FreeCodeCampPython | a77985ce1ae280fffd3fe1c451cc9f575a148866 | e1a0d215051a302368953c97e9e7210bc35c98e8 | refs/heads/main | 2023-01-21T10:14:09.275656 | 2020-12-06T16:40:14 | 2020-12-06T16:40:14 | 318,668,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
def __repr__(self):
return ('Rectangle(width=%d, height=%d)' % (self.width, self.height))
def set_width(self, val):
self.width = val
def set_height(self, val):
self.height = val
def get_area(self):
return (self.width * self.height)
def get_perimeter(self):
return (2 * self.width + 2 * self.height)
def get_diagonal(self):
return ((self.width ** 2 + self.height ** 2) ** 0.5)
def get_picture(self):
if self.width > 50 or self.height > 50:
return 'Too big for picture.'
else:
return (('*' * self.width + '\n') * self.height)
def get_amount_inside(self, shape):
return ((self.width // shape.width) * (self.height // shape.height))
class Square(Rectangle):
def __init__(self, side):
super().__init__(side, side)
def __repr__(self):
return ('Square(side=%d)' % self.width)
def set_width(self, val):
self.set_side(val)
def set_height(self, val):
self.set_side(val)
def set_side(self, val):
self.width = val
self.height = val
| [
"noreply@github.com"
] | noreply@github.com |
fa05b68b68103da6eba41cb3eace31abf9f4ba74 | 4a5a39858bab54d9fe06364ecfe8edc2747b87f6 | /Code Jam 2018/Round 1C/ant-stack.py | ddc1e8535674f7ccbaa880e66e31d6b637f53b28 | [] | no_license | gsakkas/code-jam | d85a63c11d13ba405b9df4be1e6739ef5c5394ae | 8e81a4d9b2ea11d9bbb9b3e206951a2261798458 | refs/heads/master | 2021-01-22T19:54:24.732574 | 2018-05-17T12:14:06 | 2018-05-17T12:14:06 | 85,257,349 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 888 | py | def read_int():
return int(raw_input())
def read_many_ints():
return map(int, raw_input().split())
def solve(n):
ws = read_many_ints()
large = 10 ** 100
sums = [[large] * min(139, n) for _ in xrange(n)]
for i in xrange(n):
sums[i][0] = ws[i]
# sums = {}
# sums[0] = ws[0]
# for i in xrange(1, n):
# sums[i] = 0
for i in xrange(1, n):
for j in xrange(1, min(139, i + 1)):
if sums[i - 1][j - 1] <= 6 * ws[i]:
sums[i][j] = min(sums[i - 1][j - 1] + ws[i], sums[i - 1][j])
else:
sums[i][j] = sums[i - 1][j]
j = n - 1
while j >= 0 and sums[n - 1][j] == large:
j -= 1
return j + 1
if __name__ == "__main__":
t = read_int()
for test in xrange(1, t + 1):
n = read_int()
print "Case #{}: {}".format(test, solve(n))
exit(0)
| [
"george.p.sakkas@gmail.com"
] | george.p.sakkas@gmail.com |
bb144a2611bdf1bcc08cdf8a0c080015a129f699 | 02569e805e7191d21212631d8e6818cb396702d5 | /scripts/DimRed.py | 4842f5d3db4519a6cfbbba064506e3f2a80828ce | [] | no_license | asstergi/Big-Data-and-Business-Analytics-Thesis-AUEB | f390a17f6dcf377b494d8da4c1c89538981986d2 | a6504c4b1c940bb1f4b9a0caa93f0212241f575a | refs/heads/master | 2020-12-24T14:00:39.085196 | 2018-04-07T14:45:42 | 2018-04-07T14:45:42 | 27,644,214 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,889 | py | #!/usr/bin/env python
from __future__ import division
import re
import numpy as np
import pandas as pd
from scipy.sparse import vstack, csr_matrix
from sklearn import metrics
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.decomposition import TruncatedSVD, ProjectedGradientNMF
from sklearn.random_projection import SparseRandomProjection, GaussianRandomProjection
from sklearn import svm
from sklearn.lda import LDA
from nltk import stem
from nltk.corpus import stopwords
from SamplingBasedSelection import WBS, SS
class StemTokenizer(object):
"""
Tokenizer for CountVectorizer with stemming support
"""
def __init__(self):
self.wnl = stem.WordNetLemmatizer()
self.word = re.compile('[a-zA-Z]+')
def __call__(self, doc):
tokens = re.split('\W+', doc.lower())
tokens = [self.wnl.lemmatize(t) for t in tokens]
return tokens
def build_tfidf(train_data, test_data):
stops = stopwords.words('english')
counter = CountVectorizer(tokenizer=StemTokenizer(),
stop_words=stops, min_df=3,
dtype=np.double)
counter.fit(train_data)
train_tf = counter.transform(train_data)
test_tf = counter.transform(test_data)
transformer = TfidfTransformer(norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False)
train_tfidf = transformer.fit_transform(train_tf)
test_tfidf = transformer.transform(test_tf)
return train_tfidf, test_tfidf
def benchmark(clf, train_X, train_y, test_X, test_y):
"""
evaluate classification
"""
clf.fit(train_X, train_y)
pred = clf.predict(test_X)
f1 = metrics.f1_score(test_y, pred, average='weighted')
recall = metrics.recall_score(test_y, pred, average='weighted')
precision = metrics.precision_score(test_y, pred, average='weighted')
accuracy = metrics.accuracy_score(test_y, pred)
result = {'f1' : f1, 'recall' : recall, 'precision' : precision, 'accuracy' : accuracy}
return result
def select_features_chi2(train_X, train_y, test_X, k):
if k == 'all':
return train_X, test_X
selector = SelectKBest(chi2, k=k)
selector.fit(train_X, train_y)
train_X = selector.transform(train_X)
test_X = selector.transform(test_X)
return train_X, test_X
def select_features_svd(train_X, train_y, test_X, k):
selector = TruncatedSVD(n_components=k, random_state=42)
selector.fit(train_X)
train_X = selector.transform(train_X)
test_X = selector.transform(test_X)
return train_X, test_X
def select_features_nmf(train_X, train_y, test_X, k):
selector = ProjectedGradientNMF(n_components=k, init='nndsvd', random_state=42)
selector.fit(train_X)
train_X = selector.transform(train_X)
test_X = selector.transform(test_X)
return train_X, test_X
def select_features_SparseRandomProjections(train_X, train_y, test_X, k):
selector = SparseRandomProjection(n_components=k, random_state=42)
selector.fit(train_X)
train_X = selector.transform(train_X)
test_X = selector.transform(test_X)
return train_X, test_X
def select_features_GaussianRandomProjections(train_X, train_y, test_X, k):
selector = GaussianRandomProjection(n_components=k, random_state=42)
selector.fit(train_X)
train_X = selector.transform(train_X)
test_X = selector.transform(test_X)
return train_X, test_X
def select_features_LDA(train_X, train_y, test_X, k):
selector = LDA(n_components=k)
selector.fit(train_X.toarray(), train_y)
train_X = selector.transform(train_X.toarray())
test_X = selector.transform(test_X.toarray())
return train_X, test_X
def select_features_WBS(train_X, train_y, test_X, k):
trainSamples = train_X.shape[0]
testSamples = test_X.shape[0]
selector = WBS(train_X, test_X, r=k)
selector.factorize()
train_X = selector._C[range(trainSamples),:]
test_X = selector._C[range(trainSamples,trainSamples+testSamples),:]
return train_X, test_X
def select_features_SS(train_X, train_y, test_X, k, svdRank, svdV):
trainSamples = train_X.shape[0]
testSamples = test_X.shape[0]
selector = SS(vstack([train_X, test_X]), r=k, svdRank = svdRank, svdV = svdV)
selector.factorize()
train_X = selector._C[range(trainSamples),:]
test_X = selector._C[range(trainSamples,trainSamples+testSamples),:]
return train_X, test_X
def InfoEntropy(results):
import math
nInstances = float(sum(results))
if nInstances == 0:
return 0
probs = results/nInstances
t = np.choose(np.greater(probs,0.0),(1,probs))
return sum(-probs*np.log(t)/math.log(2))
def InfoGain(varMat):
variableRes = np.sum(varMat,0)
overallRes = np.sum(varMat,1)
term2 = 0
for i in xrange(len(variableRes)):
term2 = term2 + variableRes[i] * InfoEntropy(varMat[i])
tSum = sum(overallRes)
if tSum != 0.0:
term2 = 1./tSum * term2
gain = InfoEntropy(overallRes) - term2
else:
gain = 0
return gain
def IG (train_X, train_y):
InfGain = []
for term in range(train_X.shape[1]):
col = train_X[:,term].toarray()
col[col > 0] = 1
DF = pd.DataFrame(np.vstack([[item for sublist in col for item in sublist], train_y]))
DF = DF.transpose()
DF.columns = ['A','B']
DF = DF.pivot_table(rows='A', columns='B', aggfunc=len, fill_value = 0)
InfGain.append(InfoGain(DF))
return np.asarray(InfGain)
def select_features_IG(train_X, test_X, InfGain, k):
mask = np.zeros(InfGain.shape, dtype=bool)
mask[np.argsort(InfGain, kind="mergesort")[-k:]] = 1
train_X = train_X[:,mask]
test_X = test_X[:,mask]
return train_X, test_X
def MutInf(varMat):
variableRes = np.sum(varMat,0)
overallRes = np.sum(varMat,1)
MItc = -np.Infinity
pt = 1.0*overallRes[1]/overallRes[0]
for docClass in range(len(variableRes)):
pc = 1.0*variableRes[docClass]/sum(variableRes)
ptc = 1.0*varMat.iloc[1,docClass]/sum(variableRes)
classMI = np.log2(ptc/(pt*pc))
if classMI> MItc:
MItc = classMI
return MItc
def MI (train_X, train_y):
MutualInformation = []
for term in range(train_X.shape[1]):
col = train_X[:,term].toarray()
col[col > 0] = 1
if (col.sum()==train_X.shape[0]):
MI = -np.Infinity
MutualInformation.append(MI)
else:
DF = pd.DataFrame(np.vstack([[item for sublist in col for item in sublist], train_y]))
DF = DF.transpose()
DF.columns = ['A','B']
DF = DF.pivot_table(rows='A', columns='B', aggfunc=len, fill_value = 0)
MutualInformation.append(MutInf(DF))
return np.asarray(MutualInformation)
def select_features_MI(train_X, test_X, MutInf, k):
mask = np.zeros(MutInf.shape, dtype=bool)
mask[np.argsort(MutInf, kind="mergesort")[-k:]] = 1
train_X = train_X[:,mask]
test_X = test_X[:,mask]
return train_X, test_X
def GI(varMat):
variableRes = np.sum(varMat,0)
pc = 1.0*variableRes/sum(variableRes)
pct = 1.0*varMat.iloc[1,:]/sum(varMat.iloc[1,:])
denominator = sum(pct/pc)
Pnew = (pct/pc)/denominator
PctWeighted = sum(Pnew**2)
return PctWeighted
def GiniIndex (train_X, train_y):
GiniIndex = []
for term in range(train_X.shape[1]):
col = train_X[:,term].toarray()
col[col > 0] = 1
if (col.sum()==train_X.shape[0]):
GinInd = 0
GiniIndex.append(GinInd)
else:
DF = pd.DataFrame(np.vstack([[item for sublist in col for item in sublist], train_y]))
DF = DF.transpose()
DF.columns = ['A','B']
DF = DF.pivot_table(rows='A', columns='B', aggfunc=len, fill_value = 0)
GiniIndex.append(GI(DF))
return np.asarray(GiniIndex)
def select_features_GI(train_X, test_X, GiniIndex, k):
mask = np.zeros(GiniIndex.shape, dtype=bool)
mask[np.argsort(GiniIndex, kind="mergesort")[-k:]] = 1
train_X = train_X[:,mask]
test_X = test_X[:,mask]
return train_X, test_X
if __name__ == '__main__':
dataset = "newsgroup"
if dataset == "reuters52" :
trainData = pd.ExcelFile('Reuters-21578 R52 train.xlsx')
testData = pd.ExcelFile('Reuters-21578 R52 test.xlsx')
trainData = trainData.parse('Sheet1', header=None)
testData = testData.parse('Sheet1', header=None)
elif dataset == "reuters8":
trainData = pd.ExcelFile('Reuters-21578 R8 train.xlsx')
testData = pd.ExcelFile('Reuters-21578 R8 test.xlsx')
trainData = trainData.parse('Sheet1', header=None)
testData = testData.parse('Sheet1', header=None)
elif dataset == "newsgroup":
trainData = pd.ExcelFile('newsgroup train.xlsx')
testData = pd.ExcelFile('newsgroup test.xlsx')
trainData = trainData.parse('Sheet1', header=None)
testData = testData.parse('Sheet1', header=None)
train_text = trainData.iloc[:,1]
test_text = testData.iloc[:,1]
train_label = trainData.iloc[:,0]
test_label = testData.iloc[:,0]
print('encode labels')
encoder = LabelEncoder()
train_y = encoder.fit_transform(train_label)
test_y = encoder.transform(test_label)
print("build tfidf")
train_X, test_X = build_tfidf(train_text, test_text)
print("calculating Information Gain")
InfGain = IG(train_X, train_y)
print("calculating Mutual Information")
MutInfor = MI(train_X, train_y)
print("calculating Gini Index")
GinInd = GiniIndex(train_X, train_y)
print("calculating SVD for Subspace-Sampling")
U, S, V = np.linalg.svd(train_X.todense(),full_matrices =False)
train_X = csr_matrix(train_X)
test_X = csr_matrix(test_X)
print("training")
k_features = [2, 5, 10, 50, 100, 200, 500, 1000]
results = []
methods = ['IG','chi-2', 'MImax', 'GI', 'SVD', 'Sparse Random Projection', 'Gaussian Random Projection', 'NMF', 'LDA', 'WBS','SS']
def updatescores(name, k, result):
"""
update parameters and scores
"""
result.update({'method' : name , 'n_features' : k})
results.append(result)
for method in methods:
for k in k_features:
print("select {} features using {}".format(k, method))
if method == 'chi-2':
train_X_sub, test_X_sub = select_features_chi2(train_X, train_y,test_X, k)
elif method == 'IG':
train_X_sub, test_X_sub = select_features_IG(train_X, test_X, InfGain, k)
elif method == 'MImax':
train_X_sub, test_X_sub = select_features_MI(train_X, test_X, MutInfor, k)
elif method == 'GI':
train_X_sub, test_X_sub = select_features_GI(train_X, test_X, GinInd, k)
elif method == 'SVD':
train_X_sub, test_X_sub = select_features_svd(train_X, train_y,test_X, k)
elif method == 'Sparse Random Projection':
train_X_sub, test_X_sub = select_features_SparseRandomProjections(train_X, train_y,test_X, k)
elif method == 'Gaussian Random Projection':
train_X_sub, test_X_sub = select_features_GaussianRandomProjections(train_X, train_y,test_X, k)
elif method == 'NMF':
train_X_sub, test_X_sub = select_features_nmf(train_X, train_y,test_X, k)
elif method == 'LDA':
if k < len(np.unique(train_y)):
train_X_sub, test_X_sub = select_features_LDA(train_X, train_y,test_X, k)
else:
continue
elif method == 'WBS':
train_X_sub, test_X_sub = select_features_WBS(train_X, train_y,test_X, k)
elif method == 'SS':
train_X_sub, test_X_sub = select_features_SS(train_X, train_y,test_X, k, svdRank=100, svdV=V)
elif method == 'all':
train_X_sub, test_X_sub = train_X, test_X
print('Training SVM...')
clf = svm.LinearSVC()
result = benchmark(clf, train_X_sub, train_y, test_X_sub, test_y)
updatescores(method, k, result)
resultsDF = pd.DataFrame(results)
print resultsDF
resultsDF.to_csv('Results.csv') | [
"a.stergioudis@gmail.com"
] | a.stergioudis@gmail.com |
af7343241d25adfa0239fc48d6b1c29e0fd2cfcf | 360ae1188ad79e71ccc72da0b9ae709bda678f91 | /ryu/services/protocols/__init__.py | 340a42305b81a40727ffe472e0a96ccaa638aed4 | [
"Apache-2.0"
] | permissive | faucetsdn/ryu | 47b3523e7ccb381f3bdf2877a3f9f01cb1876054 | d6cda4f427ff8de82b94c58aa826824a106014c2 | refs/heads/master | 2023-09-05T06:37:21.991029 | 2022-06-09T23:09:40 | 2022-06-09T23:09:40 | 2,945,007 | 385 | 215 | Apache-2.0 | 2022-11-13T10:50:25 | 2011-12-09T03:43:50 | Python | UTF-8 | Python | false | false | 682 | py | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| [
"fujita.tomonori@lab.ntt.co.jp"
] | fujita.tomonori@lab.ntt.co.jp |
6114b62911bc8b004342e18dd2ca4b7eaed01aaa | 8bba06939a8cce6b5ecce25d15f9f9062b8206d8 | /pyrelacs/NixIterators.py | 31b3b33958cc313e684a0b3aa724840915e1bf49 | [] | no_license | jpresern/pyrelacs | e72c82d999e5d97eaecde2eed1bee94041fe46f3 | 62eefec7541406eeb30d932b8a844ea5abef2fd6 | refs/heads/master | 2021-01-18T02:30:25.237867 | 2016-03-11T14:47:20 | 2016-03-11T14:47:20 | 27,421,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,897 | py | from IPython import embed
import nix
import numpy as np
def trial_iterator(multi_tag):
traces = {r.name:np.asarray(r.data) for r in multi_tag.references if r.dimensions[0].dimension_type.name == 'Set'}
sample_interv = {r.name:r.dimensions[0].sampling_interval for r in multi_tag.references if r.dimensions[0].dimension_type.name == 'Sample'}
positions = multi_tag.positions[:]
extents = multi_tag.extents[:]
for i, (p, e) in enumerate(zip(positions, extents)):
ret = {}
for ref_no, r in enumerate(multi_tag.references):
dim = r.dimensions[0]
if dim.dimension_type.name == 'Set':
ret[r.name] = traces[r.name][(traces[r.name] >= p) & (traces[r.name] <= p+e)]
else:
ret[r.name] = multi_tag.retrieve_data(i, ref_no)
ret['t'] = np.arange(p,p+e+sample_interv['V-1'],sample_interv['V-1'])
yield ret
#
# for p, e in zip(np.asarray(multi_tag.positions.data), np.asarray(multi_tag.extents.data)):
# ret = dict()
# for r in multi_tag.references:
# dim = r.dimensions[0]
#
# if dim.dimension_type.name == 'Set':
# ret[r.name] = traces[r.name][(traces[r.name] >= p) & (traces[r.name] <= p+e)]
# elif dim.dimension_type.name == 'Sample':
# pos = int(p/sample_interv[r.name])
# ext = int(e/sample_interv[r.name])
# ret[r.name] = traces[r.name][pos:pos+ext]
# ret['t'] = np.arange(p,p+e,sample_interv['V-1'])
if __name__=="__main__":
import sys
file = sys.argv[1]
nix_file = nix.File.open(file, nix.FileMode.ReadWrite)
for block in [b for b in nix_file.blocks if 'FI-Curve' not in b.name]:
for tag in block.multi_tags:
for trial_data in trial_iterator(tag):
embed()
exit()
| [
"fabian.sinz@uni-tuebingen.de"
] | fabian.sinz@uni-tuebingen.de |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.