blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c6b54f026d0192e33cc07008b2f0144c230049c8
|
0f79fd61dc47fcafe22f83151c4cf5f2f013a992
|
/BOJ/1753.py
|
9ad0359bef96a50500ff44c3a43a2cb8f213596e
|
[] |
no_license
|
sangm1n/problem-solving
|
670e119f28b0f0e293dbc98fc8a1aea74ea465ab
|
bc03f8ea9a6a4af5d58f8c45c41e9f6923f55c62
|
refs/heads/master
| 2023-04-22T17:56:21.967766
| 2021-05-05T12:34:01
| 2021-05-05T12:34:01
| 282,863,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 881
|
py
|
"""
author : Lee Sang Min
github : https://github.com/sangm1n
e-mail : dltkd96als@naver.com
title : 최단경로
description : Shortest Path
"""
import heapq
def dijkstra(start, distance):
q = []
heapq.heappush(q, (0, start))
distance[start] = 0
while q:
weight, v = heapq.heappop(q)
if distance[v] >= weight:
for i, cost in graph[v]:
new_cost = weight + cost
if distance[i] > new_cost:
distance[i] = new_cost
heapq.heappush(q, (new_cost, i))
INF = int(1e9)
V, E = map(int, input().split())
start = int(input())
graph = [[] for _ in range(V + 1)]
for _ in range(E):
u, v, w = map(int, input().split())
graph[u].append((v, w))
distance = [INF] * (V + 1)
dijkstra(start, distance)
[print('INF' if dist == INF else dist) for dist in distance[1:]]
|
[
"dltkd96als@naver.com"
] |
dltkd96als@naver.com
|
8f49d60cc15b59556cdcb8cf63d51c39fdccb49e
|
bb680585552532aeaad4c297f68c6d87c29b70b3
|
/checks/cs50/2017/x/greedy/checks.py
|
0ad789000789772622adc3837c1603266050c84d
|
[] |
no_license
|
mstrperson/check50
|
3eca1d2f67ca422d50313771953e903fd1d3636f
|
e615b8685c5aeb4c4015c86cd62cae9dc1c9bab5
|
refs/heads/develop
| 2021-01-01T06:14:33.906452
| 2017-07-18T20:07:02
| 2017-07-18T20:07:02
| 97,390,234
| 0
| 0
| null | 2017-07-18T18:54:13
| 2017-07-16T14:51:44
|
Python
|
UTF-8
|
Python
| false
| false
| 2,133
|
py
|
import os
import re
import sys
sys.path.append(os.getcwd())
from check50 import TestCase, Error, check
class Greedy(TestCase):
@check()
def exists(self):
"""greedy.c exists."""
super(Greedy, self).exists("greedy.c")
@check("exists")
def compiles(self):
"""greedy.c compiles."""
self.spawn("clang -o greedy greedy.c -lcs50").exit(0)
@check("compiles")
def test041(self):
"""input of 0.41 yields output of 4"""
self.spawn("./greedy").stdin("0.41").stdout("^4\n$", 4).exit(0)
@check("compiles")
def test001(self):
"""input of 0.01 yields output of 1"""
self.spawn("./greedy").stdin("0.01").stdout("^1\n$", 1).exit(0)
@check("compiles")
def test015(self):
"""input of 0.15 yields output of 2"""
self.spawn("./greedy").stdin("0.15").stdout("^2\n$", 2).exit(0)
@check("compiles")
def test160(self):
"""input of 1.6 yields output of 7"""
self.spawn("./greedy").stdin("1.6").stdout("^7\n$", 7).exit(0)
@check("compiles")
def test230(self):
"""input of 23 yields output of 92"""
self.spawn("./greedy").stdin("23").stdout("^92\n$", 92).exit(0)
@check("compiles")
def test420(self):
"""input of 4.2 yields output of 18"""
out = self.spawn("./greedy").stdin("4.2").stdout()
desired = "18"
if not re.compile("^18\n$").match(out):
if re.compile("^22\n$").match(out):
raise Error((out, desired), "Did you forget to round your input to the nearest cent?")
else:
raise Error((out, desired))
@check("compiles")
def test_reject_negative(self):
"""rejects a negative input like -.1"""
self.spawn("./greedy").stdin("-1").reject()
@check("compiles")
def test_reject_foo(self):
"""rejects a non-numeric input of "foo" """
self.spawn("./greedy").stdin("foo").reject()
@check("compiles")
def test_reject_empty(self):
"""rejects a non-numeric input of "" """
self.spawn("./greedy").stdin("").reject()
|
[
"brianyu28@gmail.com"
] |
brianyu28@gmail.com
|
56555304f5a4596878649f0fb0942c6ca5ceb0d2
|
d14e79ff45ac22139df111c07fd130c2b37a1ad3
|
/slide.py
|
41a2425b464aae7f4b25d60152fdcd42e5a85a8d
|
[] |
no_license
|
msarch/slide
|
90127c19ed1c3fc940bec521e29fec04cf531ec4
|
ec8d34124e43e1a5cde4290de5560e90a3d3f669
|
refs/heads/master
| 2021-01-19T18:41:13.309224
| 2017-04-28T19:41:23
| 2017-04-28T19:41:23
| 88,375,102
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,862
|
py
|
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
# simple pyglet animation
# http://www.github.com/msarch/slide
import math
import pyglet
from pyglet.gl import *
DEG2RAD = 2* math.pi/360
OMEGA = 360.0 * 0.5 # angular velocity (rev/s) : 1/2 rev/s
ORIGIN = [1280/2,800/2,0] # x,y of screen center, rotation = 0
alpha = 0.0 # initial angle
vis = 1 # visibility switch
#---------------------------------- SKETCH ------------------------------------
class Sketch(pyglet.graphics.Group): # subclass with position/rotation ability
'''
'sketches' are regular pyglet graphics.Groups whom 'set_state' and
'unset_state' methods are used to add move and rotate functionnalities.
Adding a shape to a group (batch.add) returns the matching vertex list,
color and vertex position are accessible through .colors and .vertices
'''
def __init__(self,pos=ORIGIN):
super(Sketch, self).__init__()
self.pos=pos
def set_state(self):
glPushMatrix()
glTranslatef(self.pos[0], self.pos[1], 0)
glRotatef(self.pos[2], 0, 0, 1) # rot. in degrees; x,y,z of rot. axis
def unset_state(self):
glPopMatrix()
# vertex_list modifier function -----------------------------------------------
def translate(vtx,pos): # modifying a list of vertices at once to new pos
return(reduce(tuple.__add__, zip([x+pos[0] for x in vtx[0::2]],
[y+pos[1] for y in vtx[1::2]])))
#--------------------------------- PYGLET STUFF -------------------------------
batch = pyglet.graphics.Batch() # holds all graphics
canvas = pyglet.window.Window(fullscreen=True)
canvas.set_mouse_visible(False)
glEnable(GL_LINE_SMOOTH)
glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE)
glEnable(GL_BLEND) # transparency
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) # transparency
black =( 0, 0, 0, 255)
glClearColor(*black) # background color
@canvas.event
def on_key_press(symbol, modifiers):
global vis
if symbol == pyglet.window.key.I:
vis=not(vis) # visibility switch
toggle(vis)
else: pyglet.app.exit()
@canvas.event
def draw():
canvas.clear()
batch.draw() # ALL graphics are added to this single batch!
def update(dt): # updates an uniform circular motion then calls custom actions
global alpha
alpha+= dt * OMEGA % 360 # stay within [0,360°]
updates(dt)
draw()
def toggle(vis):
for e in [vr, hr]: e.colors = (200,200,200,255*vis)*5
dot.colors = (255,0,0,255*vis)*5
#-------------------------------- SCENE STUFF ---------------------------------
still = Sketch() # is a still sketch, 'default'
wheel = Sketch() # is revolving, 'default'
hslide = Sketch() # sliding horizontally
vslide = Sketch() # slides verticaly
# dot -------------------------------------------------------------------------
# dot, transparency toggled when key 'I' pressed
dot=batch.add(5, pyglet.gl.GL_LINE_STRIP, wheel,'v2i/static', 'c4B/static')
dot.colors = (255,0,0,255*vis)*5 # vertex list color data, rgba format
dot.vertices = translate([-3, 0, 3, 0, 0, 0, 0, 3, 0, -3], (400-10,0))
# recs ------------------------------------------------------------------------
def rec(w=100, h=100, color=(255,255,255,255), pos=ORIGIN, sk=still):
rec=batch.add(6, pyglet.gl.GL_TRIANGLES, sk, 'v2f/static', 'c4B/static')
rec.colors = color*6
rec.vertices = translate((0,0,0,h,w,h,w,h,w,0,0,0), pos)
return(rec) # batch.add() returns a vertex_list
gu = int(800/85) # overall drawing V size is 85 gu and just fits into screen
len, wth, thk = 33 * gu, 11 * gu, 6 * gu # proportions of the kapla block
white = (255, 255, 255, 255)
# four + 1 horizontal rects
r1 = rec(w=len, h=thk, color=white, pos=(wth/2 + thk, wth/2))
r2 = rec(w=len, h=thk, color=white, pos=(wth/2 + thk, -wth/2-thk))
r3 = rec(w=len, h=thk, color=white, pos=(-len-thk-wth/2, wth/2))
r4 = rec(w=len, h=thk, color=white, pos=(-len-thk-wth/2, -wth/2-thk))
s1 = rec(w=len, h=wth, color=white, pos=(-len/2, -wth/2, 0, 0), sk=hslide)
# four vertical rects
r5 = rec(w=thk, h=len, color=white, pos=(wth/2, wth/2+thk))
r6 = rec(w=thk, h=len, color=white, pos=(-wth/2-thk, wth/2+thk))
r7 = rec(w=thk, h=len, color=white, pos=(wth/2, -len-thk-wth/2))
r8 = rec(w=thk, h=len, color=white, pos=(-wth/2 - thk, -len-thk-wth/2))
s2 = rec(w=wth, h=len, color=white, pos=(-wth/2, -len/2, 0.1, 0), sk=vslide)
# large rec, transparency toggled when key 'I' pressed
vr=batch.add(5, pyglet.gl.GL_LINE_STRIP, vslide, 'v2f/static', 'c4B/static')
vr.colors = (200,200,200,255*vis)*5 # vis = true/false visibility switch
vr.vertices = (-640,-len/2,640,-len/2,640,len/2,-640,len/2,-640, -len/2)
# large rec, transparency toggled when key 'I' pressed
hr=batch.add(5, pyglet.gl.GL_LINE_STRIP, hslide, 'v2f/static', 'c4B/static')
hr.colors = (200,200,200,255*vis)*5 # vis = true/false visibility switch
hr.vertices = (-len/2,-400,len/2,-400,len/2,400,-len/2,400,-len/2, -400)
# updates ---------------------------------------------------------------------
from itertools import cycle
previous_hdir, previous_vdir = 1, 1
BOW = pyglet.media.load('bow.wav', streaming=False)
BOW1 = pyglet.media.load('bow1.wav', streaming=False)
# kapla_colors
redk =(255, 69, 0, 255) # red kapla
bluk =( 0, 0, 140, 255) # blue kapla
grnk =( 0, 99, 0, 255) # green kapla
yelk =(255, 214, 0, 255) # yellow kapla
target_h = cycle((r2,r1,r3,r4,s1)) # color change toggled by hslide movmnt
target_v = cycle((r5,r6,r8,r7,s2)) # color change toggled by vslide movmnt
h_color = cycle((redk, grnk, bluk, yelk)) # color choice for target_h
v_color = cycle((yelk, bluk, grnk, redk)) # color choice for target_v
def updates(dt):
global previous_hdir, previous_vdir
# wheel is rotating
wheel.pos = [wheel.pos[0],wheel.pos[1], alpha]
# hslide is oscillating
# if direction changes, target_h colors cycle, sound played
cosa = math.cos(alpha*DEG2RAD)
previous_h_pos = hslide.pos[0]
hslide.pos = [640+cosa*(640-len/2), hslide.pos[1], 0]
new_hdir = cmp( previous_h_pos, hslide.pos[0])
if new_hdir + previous_hdir == 0:
BOW.play()
target_h.next().colors = h_color.next()*6
previous_hdir=new_hdir
# vslide is oscillating
# if direction changes, target_v colors cycle, sound played
sina = math.sin(alpha*DEG2RAD)
previous_vslide_pos1 = vslide.pos[1]
vslide.pos = [vslide.pos[0], 400+sina*(400-len/2), 0]
new_vdir = cmp( previous_vslide_pos1, vslide.pos[1])
if new_vdir + previous_vdir == 0:
BOW1.play()
target_v.next().colors = v_color.next()*6
previous_vdir=new_vdir
#---------------------------------- MAIN --------------------------------------
if __name__ == "__main__":
pyglet.clock.schedule_interval(update, 1.0/60)
pyglet.app.run()
|
[
"msarch@free.fr"
] |
msarch@free.fr
|
72b25c0a76850bdcae1bfe7848dc4a64f1b3ddc7
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_96/762.py
|
2083c366cc527bd043cb724883b8d032aaa22ed4
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,937
|
py
|
import math
def dfs(N,S,P,t):
# print t
q=[]
q.append((S,-1,0))
maxV=-1
while(len(q)!=0):
#print q
c=q.pop()
#print c
if c[1]==N-1:
if c[0]==0:
maxV=max(maxV,c[2])
continue
shifts=[-2,-1,0,1,2]
cTop=t[c[1]+1]
#print cTop
p1s=cTop/3
pshifts=[-1,0,1]
for a in pshifts:
p1=p1s+a
if p1<0 or p1>10:
continue
for x in shifts:
p2=p1s+x
p3=cTop-p1-p2
if p2<0 or p2>10 or p3<0 or p3>10:
continue
delta=0
if p1>=P or p2>=P or p3>=P :
delta=1
if math.fabs(p3-p1)<=1 and math.fabs(p3-p2)<=1 and math.fabs(p1-p2)<=1:
# print 't',p1,p2,p3
q.append((c[0],c[1]+1,c[2]+delta))
elif math.fabs(p3-p1)<=2 and math.fabs(p3-p2)<=2 and math.fabs(p1-p2)<=2 and c[0]>0:
# print 't',p1,p2,p3
q.append((c[0]-1,c[1]+1,c[2]+delta))
# elif math.fabs(p3-p1)<=2 and math.fabs(p3-p2)==2and c[0]>0:
# q.append((c[0]-1,c[1]+1,c[2]+delta))
return maxV
f = open('B-small-attempt0.in', 'r')
T=int(f.readline())
totalOut=''
for i in range(1,T+1):
S= f.readline()
if (('\n') in S):
S=S[:-1]
lst=S.split(' ')
N=int(lst[0])
S=int(lst[1])
P=int(lst[2])
lst=lst[3:]
t=map(int,lst)
totalOut+= 'Case #'+str(i)+': '+str(dfs(N,S,P,t))+'\n'
totalOut=totalOut[:-1]
#print totalOut
outD= open ('B-small-attempt0.out','w')
outD.write(totalOut)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
1a7ef9178fd407f25a697c82abad53c9092aff20
|
e64c3c051f6b70aa0bab5be3508448578b1e0b15
|
/core/orienteer/models/attitude/__init__.py
|
f338c8ab793aec114ca2cc88b491c2a28c974f30
|
[] |
no_license
|
davenquinn/Orienteer
|
c85c6cd16145ef545279c38bdb466856c02abd9d
|
4f77575c12cf7a04ce70e3045630079ab8ebc5e0
|
refs/heads/master
| 2022-07-10T20:39:36.414259
| 2022-06-15T18:56:38
| 2022-06-15T18:56:38
| 172,312,115
| 0
| 0
| null | 2022-06-15T18:56:39
| 2019-02-24T08:09:26
|
CoffeeScript
|
UTF-8
|
Python
| false
| false
| 8,078
|
py
|
from __future__ import division
import numpy as N
from shapely.geometry import mapping
from sqlalchemy import func, select, CheckConstraint
from sqlalchemy.ext.associationproxy import association_proxy
import logging as log
from geoalchemy2.types import Geometry
from geoalchemy2.shape import from_shape, to_shape
from attitude.orientation import Orientation
from attitude.coordinates import centered
from attitude.error.axes import sampling_axes, noise_axes, angular_errors
from sqlalchemy.dialects.postgresql import array, ARRAY
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
from sqlalchemy import (
Column,
String,
Text,
Integer,
DateTime,
ForeignKey,
Boolean,
Float,
)
from .tag import Tag, attitude_tag
from ..feature import DatasetFeature, SRID
from ...database import db
from ..base import BaseModel
class Attitude(BaseModel):
__tablename__ = "attitude"
__mapper_args__ = dict(polymorphic_on="type", polymorphic_identity="single")
id = Column(Integer, primary_key=True)
type = Column(String)
feature_id = Column(
Integer,
ForeignKey("dataset_feature.id", ondelete="CASCADE", onupdate="CASCADE"),
)
feature = relationship(DatasetFeature)
strike = Column(Float)
dip = Column(Float)
rake = Column(Float)
correlation_coefficient = Column(Float)
principal_axes = Column(ARRAY(Float, dimensions=2, zero_indexes=True))
singular_values = Column(ARRAY(Float, zero_indexes=True))
hyperbolic_axes = Column(ARRAY(Float, zero_indexes=True))
n_samples = Column(Integer)
max_angular_error = Column(Float)
min_angular_error = Column(Float)
geometry = association_proxy("feature", "geometry")
center = Column(Geometry("POINTZ", srid=SRID))
valid = Column(Boolean)
member_of = Column(Integer, ForeignKey("attitude.id"))
group = relationship("AttitudeGroup", back_populates="measurements", remote_side=id)
_tags = relationship("Tag", secondary=attitude_tag, backref="features")
tags = association_proxy("_tags", "name")
__table_args__ = (
# Check that we don't define group membership and feature
# if isn't a group.
CheckConstraint("feature_id IS NOT NULL = (type = 'single')"),
# Groups should not be members of other groups
CheckConstraint(
"type IN ('group','collection') = (member_of IS NULL AND feature_id IS NULL)"
),
)
@property
def aligned_array(self):
"""
Array aligned with the principal components
of the orientation measurement.
"""
return N.array(self.feature.axis_aligned)
def error_ellipse(self):
from .plot import error_ellipse
return error_ellipse(self)
def plot_aligned(self):
from attitude.display import plot_aligned
return plot_aligned(self.pca())
@property
def array(self):
return self.feature.array
@property
def centered_array(self):
return centered(self.array)
def regress(self):
return self.pca
def pca(self):
"""
Initialize a principal components
analysis against the attitude.
"""
try:
return self.__pca
except AttributeError:
a = self.centered_array
ax = N.array(self.principal_axes) * N.array(self.singular_values)
self.__pca = Orientation(a, axes=ax)
return self.__pca
def __repr__(self):
def val(obj, s):
try:
return s.format(obj)
except ValueError:
return "unmeasured"
except TypeError:
return "unmeasured"
s = "{cls} {id}: strike {s}, dip {d}".format(
cls=self.__class__.__name__,
id=self.id,
s=val(self.strike, "{0:.1f}"),
d=val(self.dip, "{0:.1f}"),
)
return s
def serialize(self):
return dict(
type="Feature",
id=self.id,
tags=list(self.tags),
geometry=mapping(to_shape(self.feature.geometry)),
properties=dict(
r=self.correlation_coefficient,
center=mapping(to_shape(self.center)),
strike=self.strike,
dip=self.dip,
rake=self.rake,
n_samples=self.n_samples,
hyperbolic_axes=self.hyperbolic_axes,
axes=self.principal_axes,
),
)
def calculate(self):
self.center = func.ST_SetSRID(func.ST_MakePoint(*self.array.mean(axis=0)), SRID)
try:
pca = Orientation(self.centered_array)
except IndexError:
# If there aren't enough coordinates
return
except ValueError:
return
self.principal_axes = pca.axes.tolist()
self.singular_values = pca.singular_values.tolist()
# Really this is hyperbolic axis lengths
# should change API to reflect this distinction
self.hyperbolic_axes = sampling_axes(pca).tolist()
self.n_samples = pca.n
self.strike, self.dip, self.rake = pca.strike_dip_rake()
if self.dip == 90:
self.valid = False
a = angular_errors(self.hyperbolic_axes)
self.min_angular_error = 2 * N.degrees(a[0])
self.max_angular_error = 2 * N.degrees(a[1])
# Analogous to correlation coefficient for PCA
# but not exactly the same
self.correlation_coefficient = pca.explained_variance
def extract(self, *args, **kwargs):
self.feature.extract(*args, **kwargs)
def __str__(self):
return "Attitude {}".format(self.id)
class AttitudeGroup(Attitude):
__mapper_args__ = dict(polymorphic_on="type", polymorphic_identity="group")
same_plane = Column(Boolean, nullable=False, default=False, server_default="0")
measurements = relationship(Attitude)
def __init__(self, attitudes, **kwargs):
self.type = "group"
self.feature_id = None
self.member_of = None
self.measurements = attitudes
Attitude.__init__(self, **kwargs)
self.calculate()
def __str__(self):
return "Group {}".format(self.id)
# Add a property for geometry that creates a union
# of all component data
def __build_geometry(self):
"""
Un-executed query to find geometry from component
parts
"""
__ = func.ST_Union(DatasetFeature.geometry)
return (
select([func.ST_SetSrid(__, srid.world)])
.select_from(DatasetFeature.__table__.join(Attitude))
.where(Attitude.member_of == self.id)
.group_by(Attitude.member_of)
)
@hybrid_property
def geometry(self):
return db.session.execute(self.__build_geometry()).scalar()
@geometry.expression
def geometry(cls):
return __build_geometry(cls)
@property
def centered_array(self):
if self.same_plane:
a = "array"
else:
a = "centered_array"
arrays = [getattr(m, a) for m in self.measurements]
if len(arrays) == 0:
return N.array([])
arr = N.concatenate(arrays)
if self.same_plane:
return centered(arr)
else:
return arr
@property
def array(self):
return N.concatenate([m.array for m in self.measurements])
def serialize(self):
return dict(
type="GroupedAttitude",
id=self.id,
strike=self.strike,
dip=self.dip,
tags=list(self.tags),
same_plane=self.same_plane,
r=self.correlation_coefficient,
n_samples=self.n_samples,
hyperbolic_axes=self.hyperbolic_axes,
axes=self.principal_axes,
measurements=[m.id for m in self.measurements],
)
|
[
"dev@davenquinn.com"
] |
dev@davenquinn.com
|
cb05c2490be60fcbc72afce15389303ae36f68d2
|
5f0f0865b7e4e2aa1867a88c138df56936c0b23b
|
/blocks/tests/test_pylearn2.py
|
6317698fe0dd1326830e4961c5d482d72d3399e1
|
[
"MIT"
] |
permissive
|
jych/blocks
|
2c709dcf042f4259981adcb54d9e3a48dac0c87f
|
995cb7b67545b272877ecf9e90285cc71c9e6091
|
refs/heads/master
| 2021-01-09T06:51:34.967301
| 2014-11-27T04:12:40
| 2014-11-27T04:12:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,329
|
py
|
import logging
import numpy
import pylearn2
from pylearn2.space import VectorSpace
from pylearn2.testing.datasets import random_dense_design_matrix
from pylearn2.train import Train
from pylearn2.training_algorithms.sgd import SGD
from blocks.bricks import Sigmoid, MLP
from blocks.cost import SquaredError
from blocks.initialization import IsotropicGaussian, Constant
from blocks.pylearn2 import BlocksModel, BlocksCost
def test_pylearn2_trainin():
# Construct the model
mlp = MLP(activations=[Sigmoid(), Sigmoid()], dims=[784, 100, 784],
weights_init=IsotropicGaussian(), biases_init=Constant(0.01))
mlp.initialize()
cost = SquaredError()
block_cost = BlocksCost(cost)
block_model = BlocksModel(mlp, (VectorSpace(dim=784), 'features'))
# Load the data
rng = numpy.random.RandomState(14)
train_dataset = random_dense_design_matrix(rng, 1024, 784, 10)
valid_dataset = random_dense_design_matrix(rng, 1024, 784, 10)
# Silence Pylearn2's logger
logger = logging.getLogger(pylearn2.__name__)
logger.setLevel(logging.ERROR)
# Training algorithm
sgd = SGD(learning_rate=0.01, cost=block_cost, batch_size=128,
monitoring_dataset=valid_dataset)
train = Train(train_dataset, block_model, algorithm=sgd)
train.main_loop(time_budget=3)
|
[
"bart.vanmerrienboer@gmail.com"
] |
bart.vanmerrienboer@gmail.com
|
7c4bb7211655df9f643a3c3968ccecb8f9c5c2bd
|
700b0528e949d7eacb6846ee7579e912b854fd51
|
/TrustPot/settings.py
|
f56c0223ba3afd1780dabd7e621d0a5a53a18360
|
[] |
no_license
|
nucleoosystem/TrustPot
|
340f33a46757fa6dbd98eae6be248f14ea8ca099
|
5e72bffdcd0f4232e98638387c794e7aaf684c9c
|
refs/heads/master
| 2020-06-18T17:47:40.154449
| 2015-02-18T03:27:43
| 2015-02-18T03:27:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,268
|
py
|
"""
Django settings for TrustPot project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9ni!$$2aw4)il(+6xb2xzmboivmza(1(hugs!$^h(r!7bg(5$c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'suit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'reversion',
'translation'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS += (
"django.core.context_processors.request",
)
ROOT_URLCONF = 'TrustPot.urls'
WSGI_APPLICATION = 'TrustPot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = 'static/'
STATIC_URL = '/static/'
|
[
"danimaribeiro@gmail.com"
] |
danimaribeiro@gmail.com
|
b561709b47b0de856671dd1a8fa6d77dd686e849
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5690574640250880_0/Python/EnTerr/MinesweeperMaster.py
|
889a6773a64d627ce18e10efa7cc9d0accbd413b
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,313
|
py
|
#
# Google Code Jam 2014
# Roaund 0: C. Minesweeper Master
# submission by EnTerr
#
'''
Input
The first line of the input gives the number of test cases, T. T lines follow.
Each line contains three space-separated integers: R, C, and M (Rows, Columns, Mines).
Output
For each test case, output a line containing "Case #x:", where x is the test case number.
On the following R lines, output the board configuration with C characters per line,
using '.' to represent an empty cell, '*' to represent a cell that contains a mine,
and 'c' to represent the clicked cell. If there is no possible configuration,
then instead of the grid, output a line with "Impossible" instead.
If there are multiple possible configurations, output any one of them.
Limits
0 <= M < R * C.
Small dataset
1 <= T <= 230.
1 <= R, C <= 5.
Large dataset
1 <= T <= 140.
1 <= R, C <= 50.
Sample
---Input
5
5 5 23
3 1 1
2 2 1
4 7 3
10 10 82
---Output
Case #1:
Impossible
Case #2:
c
.
*
Case #3:
Impossible
Case #4:
......*
.c....*
.......
..*....
Case #5:
**********
**********
**********
****....**
***.....**
***.c...**
***....***
**********
**********
**********
'''
import sys
from time import clock
f = open(sys.argv[1])
def input(): return f.readline().strip();
from itertools import product, combinations
def genBoards(R, C, M):
#extra empty/boundary row added at the end (also reached as the one before [0])
#each row has extra empty/boundary element at the end
for mines in combinations( product(range(R), range(C)), M):
board = [ ['.'] * C + [''] for _ in range(R) ]
for row, col in mines:
board[row][col] = '*'
yield board + [[''] * (C+1)]
pass
def oneClickSolution(R, C, M):
for bd in genBoards(R, C, M):
#count number of mines
minTile = 10
for r in range(R):
for c in range(C):
if bd[r][c] == '.':
n = sum(bd[r+i][c+j]=='*' for i in (-1,0,1) for j in (-1,0,1))
bd[r][c] = `n`
if n <= minTile:
minTile = n
minR, minC = r, c
if minTile < 10:
#use flood from a 0 square, does it reach all 0-s?
queue = [ (minR, minC) ]
nOpen = 0
while queue:
r,c = queue.pop()
if bd[r][c] == '0':
for i in -1,0,1:
for j in -1,0,1:
if i or j: # we don't add the one we popped back
queue.append( (r+i, c+j) )
if bd[r][c] not in '.*':
bd[r][c] = '.'
nOpen += 1
if M + nOpen == R*C:
bd[minR][minC] = 'c'
return '\n'.join( ''.join(row[:-1]) for row in bd[:-1] )
return 'Impossible'
clk = clock()
for caseNo in xrange(1, int(input())+1):
R, C, M = map(int, input().split())
print >>sys.stderr, caseNo, R, C, M #, oneClickSolution(R, C, M)<>'Impossible'
print 'Case #%d:' % caseNo
print oneClickSolution(R, C, M)
print >>sys.stderr, 'time= %.1f seconds' % (clock()-clk )
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
556c12041b5bfbc25611e4d336cb5c75bf26346e
|
088f76ed195918dcf35fe77d5832a75987cd183c
|
/modulos/db/migrations/0001_initial.py
|
cd415d70496dec49cf4ae7c400d93f41e5e01696
|
[] |
no_license
|
diegofer/alliance
|
0b25c8771425c32bb2fe0a9930c69ce23ebdacf3
|
2810f3faf06cc21253e5db485e5980ffa6eeb585
|
refs/heads/master
| 2021-01-25T03:20:14.340425
| 2013-10-10T06:57:20
| 2013-10-10T06:57:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,639
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Region'
db.create_table(u'db_region', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=40)),
('path', self.gf('modulos.django_google_maps.fields.PathField')()),
('center', self.gf('modulos.django_google_maps.fields.GeoLocationField')(max_length=100)),
('zoom', self.gf('django.db.models.fields.CharField')(max_length=3)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['usuarios.Usuario'])),
))
db.send_create_signal(u'db', ['Region'])
def backwards(self, orm):
# Deleting model 'Region'
db.delete_table(u'db_region')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'db.region': {
'Meta': {'object_name': 'Region'},
'center': ('modulos.django_google_maps.fields.GeoLocationField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'path': ('modulos.django_google_maps.fields.PathField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['usuarios.Usuario']"}),
'zoom': ('django.db.models.fields.CharField', [], {'max_length': '3'})
},
u'usuarios.usuario': {
'Meta': {'object_name': 'Usuario'},
'ambito': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_padre': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['db']
|
[
"diegofernando83@gmail.com"
] |
diegofernando83@gmail.com
|
1bbdf5c26fea911af2c6b0458b250f758bbb9475
|
cf0d8d989da051a81afc60d9f4986c50c1462fb7
|
/python高级/09迭代器和生成器/t03_gen_func.py
|
84dca9e3fa12480dccad8bebaf5e585e7a7999cd
|
[] |
no_license
|
pankypan/PythonNotes
|
6a8da81a0e79f8bdc757f8493985321ef7873b44
|
48660b00b3b65cca409e61d34c32a024702d5a6e
|
refs/heads/master
| 2023-04-29T01:51:12.930856
| 2021-05-13T00:43:33
| 2021-05-13T00:43:33
| 274,271,807
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 860
|
py
|
# 生成器函数,函数里只要有yield关键字
def gen_func():
yield 1
yield 2
yield 3
def fib(index):
if index <= 2:
return 1
else:
return fib(index - 1) + fib(index - 2)
def fib2(index):
re_list = []
n, a, b = 0, 0, 1
while n < index:
re_list.append(b)
a, b = b, a + b
n += 1
return re_list
def gen_fib(index):
n, a, b = 0, 0, 1
while n < index:
yield b
a, b = b, a + b
n += 1
for data in gen_fib(10):
print(data)
# print (gen_fib(10))
# 斐波拉契 0 1 1 2 3 5 8
# 惰性求值, 延迟求值提供了可能
def func():
return 1
if __name__ == "__main__":
# 生成器对象, python编译字节码的时候就产生了,
gen = gen_func()
for value in gen:
print(value)
# re = func()
# pass
|
[
"1356523334@qq.com"
] |
1356523334@qq.com
|
2ab74c1ba61579a3956135b2dfce975ebe9e3e83
|
c659ce50198ddab51dc6e105523d74c09f25face
|
/graph-analyzer/app/io/__init__.py
|
4a6ac2824c0098e08b35a9a02cf9050f06555a4c
|
[] |
no_license
|
YanzheL/deeparcher
|
a9c4e150fecbe7413e75bf1c710c169e0b052a2e
|
85ae0de666ce05c41205748aeef40099e0a5116c
|
refs/heads/dev
| 2023-06-23T22:34:05.864953
| 2021-02-09T05:48:34
| 2021-02-09T05:48:34
| 248,565,834
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
from .cugraph import to_cugraph
from .dot import from_dot
from .dump import merge_attributes, dump_attributes
from .pb import to_pb_object, from_pb_object, to_pb, from_pb
|
[
"lee.yanzhe@yanzhe.org"
] |
lee.yanzhe@yanzhe.org
|
ff9efbbad2390741fe268885986710d2b4db69f2
|
90dfecb740ebb354c56a1542945384b9b03eacf0
|
/supplier/api/serializers.py
|
8142392929fa410c7ffa642c2f2e37ed9d2ce931
|
[] |
no_license
|
sujatakhadka111/cycleEcommerce
|
2cb688b77da916280792ed005580c8c1163a65ff
|
0da3771a9c247b2d24bcd30ec12bd47a7f8f21fd
|
refs/heads/master
| 2023-05-05T21:35:59.744465
| 2021-05-26T02:46:40
| 2021-05-26T02:46:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 954
|
py
|
from rest_framework import serializers
from supplier.models import Supplier, Category, Cycle, Gallery
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = '__all__'
class SupplierSerializer(serializers.ModelSerializer):
class Meta:
model = Supplier
fields = '__all__'
class GallerySerializer(serializers.ModelSerializer):
class Meta:
model = Gallery
fields = '__all__'
class CycleSerializer(serializers.ModelSerializer):
class Meta:
model = Cycle
fields = '__all__'
class CycleDetailSerializer(serializers.ModelSerializer):
supplier = SupplierSerializer()
category = CategorySerializer()
gallery = GallerySerializer(read_only=True, many=True, source='gallery_set')
class Meta:
model = Cycle
fields = ('supplier', 'category', 'gallery', 'name', 'slug', 'image', 'description', 'price',)
|
[
"programmertushant@gmail.com"
] |
programmertushant@gmail.com
|
6f07744254b0ab7acf4036bfef15f375bf52dbf4
|
2f09e893c3a21f4a17c95b99446d1efbf0b109f7
|
/huaytools/utils/__init__.py
|
9579bb4861c4c663a68bc19f1bb4b42632973cbb
|
[
"MIT"
] |
permissive
|
knight134/huaytools
|
b19f0078e724963415c63d60218ae3cc624f598a
|
cbecd6771c05f8241e756a7619047589397b16d3
|
refs/heads/master
| 2020-04-24T18:30:27.732740
| 2018-05-27T13:51:24
| 2018-05-27T13:51:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,949
|
py
|
""""""
import os
import sys
import pickle
import logging
from six.moves import urllib
from .bunch import Bunch, bunchify, unbunchify
from .time import *
def maybe_mkdirs(path, is_file=False, exist_ok=True):
"""递归创建文件夹
Args:
path (str): 待创建的路径,递归创建
is_file (bool): 是否为文件路径
exist_ok (bool): 默认为 True
Examples:
>>> maybe_mkdirs('D:/Tmp/a/b/')
'D:/Tmp/a/b/'
>>> maybe_mkdirs('D:/Tmp/a/b/c.txt')
'D:/Tmp/a/b/c.txt'
>>> maybe_mkdirs('D:/Tmp/a/b/c', is_file=True) # 假设 c 是一个无后缀文件
'D:/Tmp/a/b/c'
Returns:
str
"""
if is_file:
dirs, filename = os.path.split(path)
os.makedirs(dirs, exist_ok=exist_ok)
else:
os.makedirs(path, exist_ok=exist_ok)
return path
def save_to_pickle(obj, filepath):
"""
保存到 pickle 文件
Args:
obj: 需要保存的对象
filepath(str): 文件名
Returns:
None
"""
filepath = maybe_mkdirs(filepath, is_file=True)
with open(filepath, 'wb') as f:
pickle.dump(obj, f)
def load_from_pickle(filepath):
"""
从 pickle 加载对象
Args:
filepath(str): 文件名
Returns:
"""
with open(filepath) as f:
return pickle.load(f)
def set_logging_basic_config(**kwargs):
"""
快速设置 logging.basicConfig
Args can be specified:
filename: Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode: Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format: Use the specified format string for the handler.
datefmt: Use the specified date/time format.
style: If a format string is specified, use this to specify the
type of format string (possible values '%', '{', '$', for
%-formatting, :meth:`str.format` and :class:`string.Template`
- defaults to '%').
level: Set the root logger level to the specified level.
stream: Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
handlers: If specified, this should be an iterable of already created
handlers, which will be added to the root handler. Any handler
in the list which does not have a formatter assigned will be
assigned the formatter created in this function.
Returns:
None
"""
if 'format' not in kwargs:
kwargs['format'] = '[%(name)s] : %(asctime)s : %(levelname)s : %(message)s'
if 'level' not in kwargs:
kwargs['level'] = logging.INFO
logging.basicConfig(**kwargs)
def get_filepath_recursive(dirpath, abspath=True, recursive=True):
"""获取目录下所有文件名 (默认递归)
该函数主要用于需要一次性处理大量**相似文件**的情况
该函数主要利用 `os.walk(path)` 实现,
该函数会递归遍历 path 下的所有文件夹,并返回一个生成器
Args:
dirpath (str): 文件夹路径
abspath (bool): 是否返回绝对路径
recursive (bool): 是否递归
Examples:
>>> fs_gen = get_filepath_recursive('D:/Tmp')
Returns:
list
"""
fs = []
if recursive:
for root, _, files in os.walk(dirpath):
if abspath:
fs.extend((os.path.join(root, file) for file in files))
else:
fs.extend(files)
else:
if abspath:
fs.extend((os.path.join(dirpath, file) for file in os.listdir(dirpath)))
else:
fs.extend(os.listdir(dirpath))
return fs
def maybe_download(url, to_path='D:/Tmp', filename=None, expected_byte=None):
"""下载文件到指定目录
Args:
url (str): 文件下载路径
to_path (str): 下载到本地路径
filename (str): 重命名文件
expected_byte (int): 文件预期大小
Returns:
str: filepath
Examples:
>>> url = 'http://mattmahoney.net/dc/bbb.zip'
>>> filepath = maybe_download(url, filename='b.zip')
>>> fp = maybe_download(url, to_path='D:/Tmp/b', expected_byte=45370)
"""
if filename is not None:
filepath = os.path.join(maybe_mkdirs(to_path), filename)
else:
_, filename = os.path.split(url)
filepath = os.path.join(maybe_mkdirs(to_path), filename)
if not os.path.exists(filepath):
urllib.request.urlretrieve(url, filepath)
logging.info('File is downloading.')
if expected_byte is not None:
file_size = os.stat(filepath).st_size
if file_size != expected_byte:
logging.info('File has been damage, please download it manually.')
else:
logging.info('File is ready.')
return filepath
def cycle_iter(iterator):
"""
无限循环迭代器
Args:
iterator (Iterable): 可迭代对象
Examples:
>>> it = cycle_iter([1, 2, 3])
>>> for _ in range(4):
... print(next(it))
1
2
3
1
"""
# while True:
# yield from iter(iterator)
from itertools import cycle
return cycle(iterator)
def system_is_windows():
"""
If the system is windows, return True
Examples:
>>> if system_is_windows():
... print("Windows")
Windows
"""
import platform
return platform.system() == "Windows"
is_windows_system = system_is_windows()
def get_logger(name=None, fname=None, mode='a', level=logging.INFO, stream=None,
fmt="[%(name)s] : %(asctime)s : %(levelname)s : %(message)s"):
"""创建一个 logger
默认 log to console,如果同时指定了 fname,还会将日志输出到文件
Examples:
>>> logger = get_logger("Test", stream=sys.stdout, fmt="[%(name)s] : %(levelname)s : %(message)s")
>>> logger.info("test")
[Test] : INFO : test
"""
logger = logging.Logger(name)
logger.setLevel(level)
fmt = logging.Formatter(fmt)
ch = logging.StreamHandler(stream)
ch.setFormatter(fmt)
logger.addHandler(ch)
if fname is not None:
fh = logging.FileHandler(fname, mode)
fh.setFormatter(fmt)
logger.addHandler(fh)
return logger
def to_unicode(txt, encoding='utf8', errors='strict'):
"""Convert text to unicode.
Args:
txt:
encoding:
errors:
Returns:
str
"""
if sys.version_info[0] >= 3:
unicode = str
if isinstance(txt, unicode):
return txt
return unicode(txt, encoding, errors=errors)
|
[
"imhuay@163.com"
] |
imhuay@163.com
|
4d753dfde319985d5ae4884af6dac0cbd1ef5e73
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_4_neat/16_0_4_aminoacid_fractalArt.py
|
4489c1d81b9dac58e47b59005b9f5a80872dbaee
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 474
|
py
|
def fractalArt(K, C, S, inp):
if S == K:
print 'Case #%d:' % (inp + 1),
for i in xrange(1, S + 1):
print i,
print
elif 2 * S <= K:
print 'Case #%d:' % (inp + 1), 'IMPOSSIBLE'
else:
print 'Case #%d:' % (inp + 1),
for i in xrange(2, 2 + S):
print i,
if __name__ == '__main__':
for i in xrange(input()):
K, C, S = map(int, raw_input().split())
fractalArt(K, C, S, i)
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
f0673b96822fd829a04400d223f9b5677c8fe4b1
|
ec3e57d2c4de3522585176300366d4a74a971b8b
|
/0x16-api_advanced/1-top_ten.py
|
70185f62bb9482ea23c602d5779d7844238172b3
|
[] |
no_license
|
moncada92/holberton-system_engineering-devops
|
562657ebaea2a26fa0c3f874b5e88e7267c73528
|
f40d3eb6fecbcf031e42b43afb716ac63d3b86a3
|
refs/heads/master
| 2020-12-23T03:37:03.172160
| 2020-10-08T01:27:14
| 2020-10-08T01:27:14
| 237,020,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 620
|
py
|
#!/usr/bin/python3
'''top ten in reddit'''
import requests
def top_ten(subreddit):
'''get the top 10'''
url = 'https://www.reddit.com/r/{}/hot.json'.format(subreddit)
agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 \
(KHTML, like Gecko; Google Web Preview) \
Chrome/27.0.1453 Safari/537.36"
headers = {"User-Agent": agent}
response = requests.get(url, headers=headers).json()
if 'error' in response:
print('None')
return
_top = response['data']['children']
for i, top in enumerate(_top[:10], 1):
print(top['data']['title'])
|
[
"monk-da@hotmail.com"
] |
monk-da@hotmail.com
|
f971ede530c630222865d8708042fb42c083b737
|
32226e72c8cbaa734b2bdee081c2a2d4d0322702
|
/experiments/murtaza/vae/sawyer_torque_vae_td3.py
|
ae5d625df3f2c46a97b1dc2a24db2631b6b239fa
|
[
"MIT"
] |
permissive
|
Asap7772/rail-rl-franka-eval
|
2b1cbad7adae958b3b53930a837df8a31ab885dc
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
refs/heads/master
| 2022-11-15T07:08:33.416025
| 2020-07-12T22:05:32
| 2020-07-12T22:05:32
| 279,155,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,127
|
py
|
from sawyer_control.sawyer_reaching import SawyerXYZReachingImgMultitaskEnv
from railrl.launchers.launcher_util import run_experiment
from railrl.launchers.arglauncher import run_variants
import railrl.misc.hyperparameter as hyp
from railrl.torch.vae.relabeled_vae_experiment import experiment
if __name__ == "__main__":
vae_paths = {
"16": "/home/mdalal/Documents/railrl-private/data/local/05-14-sawyer-torque-vae-train-16/05-14-sawyer_torque_vae_train_16_2018_05_14_21_48_53_0000--s-32499/itr_1000.pkl",
"32": "/home/mdalal/Documents/railrl-private/data/local/05-14-sawyer-torque-vae-train-32/05-14-sawyer_torque_vae_train_32_2018_05_14_21_49_34_0000--s-13212/itr_1000.pkl",
"64": "/home/mdalal/Documents/railrl-private/data/local/05-14-sawyer-torque-vae-train-64/05-14-sawyer_torque_vae_train_64_2018_05_14_22_08_58_0000--s-19762/itr_1000.pkl",
}
use_gpu=True
variant = dict(
algo_kwargs=dict(
num_epochs=50,
num_steps_per_epoch=1000,
num_steps_per_eval=500,
tau=1e-2,
batch_size=128,
max_path_length=100,
discount=0.95,
),
env_kwargs=dict(
action_mode='torque',
reward='norm',
update_hz=100,
),
replay_kwargs=dict(
fraction_goals_are_rollout_goals=0.2,
fraction_goals_are_env_goals=0.5,
),
algorithm='TD3',
normalize=False,
rdim=16,
render=False,
env=SawyerXYZReachingImgMultitaskEnv,
use_env_goals=True,
vae_paths=vae_paths,
wrap_mujoco_env=False,
do_state_based_exp=False,
exploration_noise=0.1,
snapshot_mode='last',
mode='here_no_doodad',
use_gpu=use_gpu,
)
n_seeds = 1
search_space = {
'exploration_type': [
'ou',
],
'algo_kwargs.num_updates_per_env_step': [3],
'algo_kwargs.discount': [0.98],
'replay_kwargs.fraction_goals_are_env_goals': [0, 0.5], # 0.0 is normal, 0.5 means half goals are resampled from env
'replay_kwargs.fraction_goals_are_rollout_goals': [0.2],#[0.2, 1.0], # 1.0 is normal, 0.2 is (future, k=4) HER
'exploration_noise': [0.25],
'algo_kwargs.reward_scale': [1e-4], # use ~1e-4 for VAE experiments
'training_mode': ['train', ],
'testing_mode': ['test', ],
'rdim': [16, 32, 64], # Sweep only for VAE experiments
'seedid': range(n_seeds),
'hidden_sizes':[[100, 100]],
}
# run_variants(experiment, sweeper.iterate_hyperparameters(), run_id=10)
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
for variant in sweeper.iterate_hyperparameters():
n_seeds = 1
exp_prefix = 'test'
mode = 'here_no_doodad'
for i in range(n_seeds):
run_experiment(
experiment,
mode=mode,
exp_prefix=exp_prefix,
variant=variant,
use_gpu=use_gpu,
)
|
[
"asap7772@berkeley.edu"
] |
asap7772@berkeley.edu
|
ec52c4722e197827169f4edb78d23a75beb1cda9
|
c71e5115b895065d2abe4120799ffc28fa729086
|
/procon-archive/atcoder.jp/abc170/abc170_a/Main.py
|
16f4853ff2370c03e22ec9b47f568e64193acb68
|
[] |
no_license
|
ken0105/competitive-programming
|
eb82f92a7b7ad0db601ea341c1441de6c6165064
|
f918f85a0ea6dfbe9cac3ef835f80503bb16a75d
|
refs/heads/master
| 2023-06-05T09:55:25.264731
| 2021-06-29T14:38:20
| 2021-06-29T14:38:20
| 328,328,825
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
import math
if __name__ == "__main__":
n = list(map(int,input().split()))
for i in range(len(n)):
if n[i] == 0:
print(i + 1)
exit()
|
[
"iwata.kenaaa@gmail.com"
] |
iwata.kenaaa@gmail.com
|
3ebe3d0d69e0fb55f8d199180ad06e9cf03bca59
|
e9d2ab28bd23021aef1e478439e290d13dd5ff58
|
/python/EXAMPLES/projects/GUI_SIMPLE/p6_gui_calculate_WORKED/index.py
|
557d6ef504415e12830cc9d8f1ebaad3750d3131
|
[] |
no_license
|
zlodiak/lessons
|
cb2177203760200672cf4eec546330d9b1a87f7f
|
f9a08a51c142d37cd8c4b2d50ba5925898b1acf6
|
refs/heads/master
| 2020-12-24T08:30:30.018325
| 2016-09-17T09:27:19
| 2016-09-17T09:27:19
| 29,296,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,449
|
py
|
from tkinter import *
root = Tk()
root.geometry("500x500")
label1 = Label(root, text = "Число 1", bg = "red", fg = "white")
label1.pack(side = LEFT, anchor=NW)
input1 = Entry(root, width = 20, bd = 3)
input1.pack(side = LEFT, anchor=NW)
label2 = Label(root, text = "Число 2", bg = "red", fg = "white")
label2.pack(side = LEFT, anchor=NW)
input2 = Entry(root, width = 20, bd = 3)
input2.pack(side = LEFT, anchor=NW)
var=IntVar()
var.set(1)
rad0 = Radiobutton(root,text="сложить",
variable=var,value=0)
rad1 = Radiobutton(root,text="вычесть",
variable=var,value=1)
rad2 = Radiobutton(root,text="умножить",
variable=var,value=2)
rad3 = Radiobutton(root,text="поделить",
variable=var,value=3)
rad0.pack()
rad1.pack()
rad2.pack()
rad3.pack()
def calculate():
i1 = int(input1.get())
i2 = int(input2.get())
operation = var.get()
if operation == 0:
resultat = i1 + i2
elif operation == 1:
resultat = i1 -i2
elif operation == 2:
resultat = i1 * i2
else:
resultat = i1 / i2
result.configure(text = resultat, fg = 'blue')
button = Button(root, text = 'выполнить действие', command = calculate)
button.pack()
result = Label(root, text = 'result', fg = 'red')
result.pack()
root.mainloop()
|
[
"you@example.com"
] |
you@example.com
|
6f89807c5c4b792b3ba95fad0c3b1187097b3c86
|
470b46ff2e28f5f7fc4ecd3629980fbfd13a6313
|
/programmers/x만큼 간격이 있는 n개의 숫자.py
|
2c2a67471edb7e22400647499464326623e3e484
|
[] |
no_license
|
jihoonyou/problem-solving
|
18c3ff05ae6c37e0c41cc755ffc7377a93bd02a6
|
b7e5500ac16ff1b4736954298d13e8a5e1ab8193
|
refs/heads/master
| 2021-06-12T11:37:49.894072
| 2021-04-22T17:08:27
| 2021-04-22T17:08:27
| 181,782,842
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
'''
x만큼 간격이 있는 n개의 숫자
https://programmers.co.kr/learn/courses/30/lessons/12954
'''
def solution(x, n):
answer = []
start = 0
while n != 0:
start += x
answer.append(start)
n -= 1
return answer
|
[
"pianoetvoix@gmail.com"
] |
pianoetvoix@gmail.com
|
88988fae9222f7680a67577b6a9d0720c5253a5b
|
034974504fabd1ee4101bf11ec310173200891b9
|
/src/python/strelka/scanners/scan_vb.py
|
20eac318118590eba034ed168cd4aaa0f0fea7e9
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jshlbrd/strelka
|
9826591eb53cc5e46887d925996c38fdbec81dbe
|
98c89afcc42d8f025e60f201ee9826b6086b5828
|
refs/heads/master
| 2020-04-11T04:36:02.620498
| 2019-06-11T19:43:44
| 2019-06-11T19:43:44
| 161,518,186
| 2
| 0
|
NOASSERTION
| 2018-12-12T16:51:38
| 2018-12-12T16:51:37
| null |
UTF-8
|
Python
| false
| false
| 2,492
|
py
|
import pygments
from pygments import formatters
from pygments import lexers
from strelka import strelka
class ScanVb(strelka.Scanner):
"""Collects metadata from Visual Basic script files.
Attributes:
lexer: Pygments lexer ('vbnet') used to parse the file.
"""
def init(self):
self.lexer = lexers.get_lexer_by_name('vbnet')
def scan(self, data, file, options, expire_at):
highlight = pygments.highlight(
data,
self.lexer,
formatters.RawTokenFormatter(),
)
highlight_list = highlight.split(b'\n')
ordered_highlights = []
for hl in highlight_list:
split_highlight = hl.split(b'\t')
if len(split_highlight) == 2:
token = split_highlight[0].decode()
value = split_highlight[1].decode().strip('\'"').strip()
highlight_entry = {'token': token, 'value': value}
if highlight_entry['value']:
ordered_highlights.append(highlight_entry)
self.event.setdefault('tokens', [])
self.event.setdefault('comments', [])
self.event.setdefault('functions', [])
self.event.setdefault('names', [])
self.event.setdefault('operators', [])
self.event.setdefault('strings', [])
position = 0
while position < len(ordered_highlights):
ohlp = ordered_highlights[position]
if ohlp['token'] not in self.event['tokens']:
self.event['tokens'].append(ohlp['token'])
if ohlp['token'] == 'Token.Comment':
if ohlp['value'] not in self.event['comments']:
self.event['comments'].append(ohlp['value'])
elif ohlp['token'] == 'Token.Name.Function':
if ohlp['value'] not in self.event['functions']:
self.event['functions'].append(ohlp['value'])
elif ohlp['token'] == 'Token.Name':
if ohlp['value'] not in self.event['names']:
self.event['names'].append(ohlp['value'])
elif ohlp['token'] == 'Token.Operator':
if ohlp['value'] not in self.event['operators']:
self.event['operators'].append(ohlp['value'])
elif ohlp['token'] == 'Token.Literal.String':
if ohlp['value'] not in self.event['strings']:
self.event['strings'].append(ohlp['value'])
position += 1
|
[
"liburdi.joshua@gmail.com"
] |
liburdi.joshua@gmail.com
|
0a78a39d5c03577d008f38ca0df3535425a19bfd
|
8d9cc46c596cdcd7bc30fc89f8b2fe0c7ed40c05
|
/restdoctor/rest_framework/custom_types.py
|
40d53aef69133f4e9c5116991d8cf400e2ff65eb
|
[] |
no_license
|
yakovistomin/restdoctor
|
ac9974f6acd36745f60e67425eeb44ee1527fb06
|
1f29dce6ff179b40dbc91a2a57de0ecdea7b6af7
|
refs/heads/master
| 2023-01-30T12:36:16.506062
| 2020-12-07T20:07:47
| 2020-12-07T20:07:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,176
|
py
|
from __future__ import annotations
import typing as t
from django.db import models
from rest_framework.parsers import BaseParser
from rest_framework.renderers import BaseRenderer
from rest_framework.response import Response
from rest_framework.routers import DynamicRoute, Route
from rest_framework.viewsets import ViewSet
OpenAPISchema = t.Dict[str, 'OpenAPISchema'] # type: ignore
LocalRefs = t.Dict[t.Tuple[str, ...], t.Any]
CodesTuple = t.Tuple[str, str]
ActionCodesMap = t.Dict[str, CodesTuple]
ActionMap = t.Dict[str, str]
Handler = t.Callable[..., Response]
ResourceExtraAction = t.Tuple[str, str, Handler]
RouteOrDynamicRoute = t.Union[Route, DynamicRoute]
RouteOrDynamicRouteList = t.List[RouteOrDynamicRoute]
Parsers = t.Sequence[BaseParser]
OptionalParser = t.Optional[BaseParser]
Renderers = t.Sequence[BaseRenderer]
OptionalRenderer = t.Optional[BaseRenderer]
ResourceMapElement = t.TypeVar('ResourceMapElement')
ResourceMap = t.Dict[str, ResourceMapElement]
ResourceViewsMap = ResourceMap[t.Type[ViewSet]]
ResourceActionsMap = ResourceMap[t.Set[str]]
ResourceHandlersMap = ResourceMap[Handler]
ResourceModelsMap = ResourceMap[t.Optional[models.Model]]
|
[
"s.butkin@bestdoctor.ru"
] |
s.butkin@bestdoctor.ru
|
b9dd064c283b696b7938ee5f7e9e8ebd7db7bd8e
|
e2e08d7c97398a42e6554f913ee27340226994d9
|
/pyautoTest-master(ICF-7.5.0)/test_case/scg/scg_Route/test_c140981.py
|
f1078bce49f9f722a1cb00cd9b8cc419b55d8930
|
[] |
no_license
|
lizhuoya1111/Automated_testing_practice
|
88e7be512e831d279324ad710946232377fb4c01
|
b3a532d33ddeb8d01fff315bcd59b451befdef23
|
refs/heads/master
| 2022-12-04T08:19:29.806445
| 2020-08-14T03:51:20
| 2020-08-14T03:51:20
| 287,426,498
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,331
|
py
|
import pytest
import time
import sys
from page_obj.scg.scg_def import *
from page_obj.scg.scg_def_obj import *
from page_obj.scg.scg_def_log import *
from page_obj.common.rail import *
from page_obj.scg.scg_dev import *
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
test_id = "140981"
# 点击policy route里的“Add single Gateway”
def test_c140981(browser):
try:
login_web(browser, url=dev1)
into_fun(browser, 策略路由)
# 增加单网关路由
browser.find_element_by_xpath('//*[@id="button_area"]/div/input[2]').click()
time.sleep(1)
gettext = browser.find_element_by_xpath('//*[@id="for_config_tb_title"]/ul/li').text
# print(gettext)
try:
assert "增加新策略路由" in gettext
rail_pass(test_run_id, test_id)
except:
rail_fail(test_run_id, test_id)
assert "增加新策略路由" in gettext
except Exception as err:
# 如果上面的步骤有报错,重新设备,恢复配置
print(err)
rail_fail(test_run_id, test_id)
reload(hostip=dev1)
assert False
if __name__ == '__main__':
pytest.main(["-v", "-s", "test_c"+str(test_id)+".py"])
|
[
"15501866985@163.com"
] |
15501866985@163.com
|
79f5b7f0154dc1d9f06027f04a34a9568c525ba0
|
0e083f405af00029c9ec31849f0f7f81c56844b5
|
/mmdeploy/backend/openvino/wrapper.py
|
ab91f8331b3763712bf98412003dbc3566133b1b
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmdeploy
|
39b9e7b611caab2c76a6142fcb99f0bf1d92ad24
|
5479c8774f5b88d7ed9d399d4e305cb42cc2e73a
|
refs/heads/main
| 2023-09-01T21:29:25.315371
| 2023-08-31T09:59:29
| 2023-08-31T09:59:29
| 441,467,833
| 2,164
| 605
|
Apache-2.0
| 2023-09-14T10:39:04
| 2021-12-24T13:04:44
|
Python
|
UTF-8
|
Python
| false
| false
| 5,353
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Dict, Optional, Sequence
import numpy as np
import torch
from mmdeploy.utils import Backend
from mmdeploy.utils.timer import TimeCounter
from ..base import BACKEND_WRAPPER, BaseWrapper
@BACKEND_WRAPPER.register_module(Backend.OPENVINO.value)
class OpenVINOWrapper(BaseWrapper):
"""OpenVINO wrapper for inference in CPU.
Args:
ir_model_file (str): Input OpenVINO IR model file.
output_names (Sequence[str] | None): Names of model outputs in order.
Defaults to `None` and the wrapper will load the output names from
model.
Examples:
>>> from mmdeploy.backend.openvino import OpenVINOWrapper
>>> import torch
>>>
>>> ir_model_file = 'model.xml'
>>> model = OpenVINOWrapper(ir_model_file)
>>> inputs = dict(input=torch.randn(1, 3, 224, 224, device='cpu'))
>>> outputs = model(inputs)
>>> print(outputs)
"""
def __init__(self,
ir_model_file: str,
output_names: Optional[Sequence[str]] = None,
**kwargs):
from openvino.inference_engine import IECore
self.ie = IECore()
bin_path = osp.splitext(ir_model_file)[0] + '.bin'
self.net = self.ie.read_network(ir_model_file, bin_path)
for input in self.net.input_info.values():
batch_size = input.input_data.shape[0]
dims = len(input.input_data.shape)
# if input is a image, it has (B,C,H,W) channels,
# need batch_size==1
assert not dims == 4 or batch_size == 1, \
'Only batch 1 is supported.'
self.device = 'cpu'
self.sess = self.ie.load_network(
network=self.net, device_name=self.device.upper(), num_requests=1)
# TODO: Check if output_names can be read
if output_names is None:
output_names = [name for name in self.net.outputs]
super().__init__(output_names)
def __update_device(
self, inputs: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""Updates the device type to 'self.device' (cpu) for the input
tensors.
Args:
inputs (Dict[str, torch.Tensor]): The input name and tensor pairs.
Returns:
Dict[str, torch.Tensor]: The output name and tensor pairs
with updated device type.
"""
updated_inputs = {
name: data.to(torch.device(self.device)).contiguous()
for name, data in inputs.items()
}
return updated_inputs
def __reshape(self, inputs: Dict[str, torch.Tensor]):
"""Reshape the model for the shape of the input data.
Args:
inputs (Dict[str, torch.Tensor]): The input name and tensor pairs.
"""
input_shapes = {name: data.shape for name, data in inputs.items()}
reshape_needed = False
for input_name, input_shape in input_shapes.items():
blob_shape = self.net.input_info[input_name].input_data.shape
if not np.array_equal(input_shape, blob_shape):
reshape_needed = True
break
if reshape_needed:
self.net.reshape(input_shapes)
self.sess = self.ie.load_network(
network=self.net,
device_name=self.device.upper(),
num_requests=1)
def __process_outputs(
self, outputs: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""Converts tensors from 'torch' to 'numpy' and fixes the names of the
outputs.
Args:
outputs Dict[str, torch.Tensor]: The output name and tensor pairs.
Returns:
Dict[str, torch.Tensor]: The output name and tensor pairs
after processing.
"""
outputs = {
name: torch.from_numpy(tensor)
for name, tensor in outputs.items()
}
cleaned_outputs = {}
for name, value in outputs.items():
if '.' in name:
new_output_name = name.split('.')[0]
cleaned_outputs[new_output_name] = value
else:
cleaned_outputs[name] = value
return cleaned_outputs
def forward(self, inputs: Dict[str,
torch.Tensor]) -> Dict[str, torch.Tensor]:
"""Run forward inference.
Args:
inputs (Dict[str, torch.Tensor]): The input name and tensor pairs.
Returns:
Dict[str, torch.Tensor]: The output name and tensor pairs.
"""
inputs = self.__update_device(inputs)
self.__reshape(inputs)
outputs = self.__openvino_execute(inputs)
outputs = self.__process_outputs(outputs)
return outputs
@TimeCounter.count_time(Backend.OPENVINO.value)
def __openvino_execute(
self, inputs: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""Run inference with OpenVINO IE.
Args:
inputs (Dict[str, torch.Tensor]): The input name and tensor pairs.
Returns:
Dict[str, numpy.ndarray]: The output name and tensor pairs.
"""
outputs = self.sess.infer(inputs)
return outputs
|
[
"noreply@github.com"
] |
open-mmlab.noreply@github.com
|
de6f542882672b658532eb178c29616dbd103d99
|
658ab464e9c796f819ad85f569ad06ab6e66992e
|
/src/commonlib/pi_work.py
|
50c01111fc13ad02ca1933edff005ecb983ade37
|
[] |
no_license
|
huowolf/python-demo
|
03e5731ba632caada819dd70d0f9dc07c98308a1
|
e3b80dcc0e0bc2437a0b2882e17563c8171460a2
|
refs/heads/master
| 2020-03-23T22:00:57.515258
| 2018-09-07T15:33:22
| 2018-09-07T15:33:22
| 142,147,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 754
|
py
|
# 计算圆周率可以根据公式:
# 利用Python提供的itertools模块,我们来计算这个序列的前N项和:
import itertools
def pi(N):
' 计算pi的值 '
# step 1: 创建一个奇数序列: 1, 3, 5, 7, 9, ...
odds = itertools.count(1, 2)
# step 2: 取该序列的前N项: 1, 3, 5, 7, 9, ..., 2*N-1.
oddN=itertools.islice(odds,N)
# step 3: 添加正负符号并用4除: 4/1, -4/3, 4/5, -4/7, 4/9, ...
sum=0
for i,e in enumerate(oddN):
sum+=(-1)**i*4/e
# step 4: 求和:
return sum
# 测试:
print(pi(10))
print(pi(100))
print(pi(1000))
print(pi(10000))
assert 3.04 < pi(10) < 3.05
assert 3.13 < pi(100) < 3.14
assert 3.140 < pi(1000) < 3.141
assert 3.1414 < pi(10000) < 3.1415
print('ok')
|
[
"274956285@qq.com"
] |
274956285@qq.com
|
98e594fa4430a75bb827ee7bfbc0b330e5f0e8a0
|
75f0580af1734b9edb9e06bfadfe48f45b057872
|
/studyscores.py
|
d702f4db0b792522c145ceac388e750fd9d3fc5f
|
[] |
no_license
|
penteract/adventofcode
|
5bb317f8093f60c1d776d0983016a5288d059603
|
7b7344708ef1d58caa339a32a13f3390556b664c
|
refs/heads/master
| 2023-01-29T16:08:13.541190
| 2023-01-16T20:21:02
| 2023-01-16T20:21:02
| 160,901,373
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,034
|
py
|
#! /usr/bin/env python3
import json
dat = open("419070.json").read()
j = json.loads(dat)
byname = {j["members"][i]["name"]:j["members"][i] for i in j["members"]}
bypuzz = {}
G="get_star_ts"
HOUR=60*60
DAY = HOUR*24
for n in byname:
for l in byname[n]["completion_day_level"]:
if l not in bypuzz: bypuzz[l]={}
k = byname[n]["completion_day_level"][l]
if all(c in k for c in "12"): bypuzz[l][n] = [int(k["1"][G]),int(k["2"][G])]
elif all(c in k for c in "1"): bypuzz[l][n] = [int(k["1"][G]),int(k["1"][G])+1000000]
LEN = len("joefarebrother") + len(str(DAY)) + 1
print(" ".join((b+"").rjust(LEN) for b in ["part1","part2","delta"]))
for day,dat in sorted(bypuzz.items(),key=lambda x:int(x[0])):
l1=[]
l2=[]
ld=[]
for name,(t1,t2) in dat.items():
l1.append(((t1-5*HOUR)%DAY,name))
l2.append(((t2-5*HOUR)%DAY,name))
ld.append((t2-t1,name))
l1.sort()
l2.sort()
ld.sort()
print(day)
for tri in zip(l1,l2,ld):
print(" ".join((str(b)+":"+str(a)).rjust(LEN) for a,b in tri))
print()
|
[
"tcathcartburn@gmail.com"
] |
tcathcartburn@gmail.com
|
f67ee34a5888807e43485f6883b0f5d664156cb6
|
e67d4123c10d464c91e70210d58bd4900164645b
|
/83/D. Basketball Team/basketball_team.py
|
e6ea0d4199272efb2c911f4808f5545e4d86e15f
|
[] |
no_license
|
pkaleta/Codeforces
|
422188d4483fbf8dd99d6b0654c8e464fb143560
|
fb011f616f8db366c6aba80ff2be01692611ef81
|
refs/heads/master
| 2021-01-19T06:42:30.162981
| 2011-11-26T01:29:30
| 2011-11-26T01:29:30
| 2,853,430
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
import sys
n, m, h = map(int, sys.stdin.readline().split())
s = map(int, sys.stdin.readline().split())
ss = sum(s)
def calc(n1, n2, k):
ret = 1.0
i = n1-k+1
j = n2-k+1
while i <= n1 or j <= n2:
if i > n1: ii = 1
else: ii = i
if j > n2: jj = 1
else: jj = j
ret *= float(ii)/float(jj)
i += 1
j += 1
return ret
if (ss < n):
print "-1.0"
else:
print 1.0-calc(ss-s[h-1], ss-1, n-1)
|
[
"piotrek.kaleta@gmail.com"
] |
piotrek.kaleta@gmail.com
|
fbe7a63214573776495856cc9e932b74a59a55bb
|
86cc998fd200a89e7caf5a4acfe81b81a2d5827c
|
/lib/cron/genDNS
|
c1f0374fccff9c10a5320cdb0894b994af182ee0
|
[
"Apache-2.0"
] |
permissive
|
arguello/contractor
|
6fe28b3356548c097f28ffe54555963962351405
|
dd78f5b770ee7b5c41cddfc0a61869908b96e385
|
refs/heads/master
| 2022-05-26T22:04:53.239954
| 2020-05-03T01:55:25
| 2020-05-03T01:55:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,265
|
#!/usr/bin/env python3
import os
os.environ.setdefault( 'DJANGO_SETTINGS_MODULE', 'contractor.settings' )
import django
django.setup()
import sys
import json
import hashlib
import subprocess
from datetime import datetime
from contractor.Directory.models import Zone
from contractor.Directory.lib import genZone, genPtrZones, genMasterFile
CACHE_FILE = '/var/lib/contractor/dns.cache'
ZONE_DIR = '/etc/bind/contractor/zones/'
MASTER_FILE = '/etc/bind/contractor/dns.master'
def serial():
return str( int( datetime.now().timestamp() / 60 ) )
# serial number is a unsigned 32bit number, that is monatomically increasing
# we are taking the curent timestamp and / 60, this will give us one minute
# resolution, and will last long past y2038, if is still in use past that,
# I will impressed (20 years)
def updateFile( filename, txt, cache ):
hash = hashlib.sha256( txt.encode() ).hexdigest()
if cache.get( filename, '' ) != hash:
print( 'Writing "{0}"...'.format( filename ) )
open( os.path.join( ZONE_DIR, filename ), 'w' ).write( txt.replace( '**ZONE_SERIAL**', serial() ) )
cache[ filename ] = hash
print( 'Reading cache...' )
try:
cache = json.loads( open( CACHE_FILE, 'r' ).read() )
except FileNotFoundError:
cache = {}
except json.JSONDecodeError as e:
raise ValueError( 'Error parsing cache file: {0}'.format( e ) )
ptr_list = []
zone_file_list = []
for zone in Zone.objects.all():
print( 'Doing "{0}"...'.format( zone.fqdn ) )
filename, txt = genZone( zone, ptr_list, zone_file_list )
updateFile( filename, txt, cache )
print( 'Doing PTR zones...' )
for filename, txt in genPtrZones( ptr_list, zone_file_list ):
updateFile( filename, txt, cache )
print( 'Writing master config...' )
open( MASTER_FILE, 'w' ).write( genMasterFile( ZONE_DIR, zone_file_list ) )
print( 'Writing cache...' )
open( CACHE_FILE, 'w' ).write( json.dumps( cache ) )
print( 'Checking...' )
try:
subprocess.check_call( [ '/usr/sbin/named-checkconf', '-z' ] )
except subprocess.CalledProcessError:
print( 'Validity check failed...' )
sys.exit( 1 )
try:
subprocess.check_call( [ '/usr/sbin/rndc', 'reload' ] )
except subprocess.CalledProcessError:
print( 'WARNING: "rndc reload" failed' )
print( 'Done!' )
sys.exit( 0 )
|
[
"pnhowe@gmail.com"
] |
pnhowe@gmail.com
|
|
91f837f9b380a07ff980b9f1a00bbf9755ecaafa
|
0d0cf0165ca108e8d94056c2bae5ad07fe9f9377
|
/28_Winning_a_Kaggle_Competition_in_Python/4_Modeling/gridSearch.py
|
64f6581e3eec2ec544d66c65ea3a97365e39e676
|
[] |
no_license
|
MACHEIKH/Datacamp_Machine_Learning_For_Everyone
|
550ec4038ebdb69993e16fe22d5136f00101b692
|
9fe8947f490da221430e6dccce6e2165a42470f3
|
refs/heads/main
| 2023-01-22T06:26:15.996504
| 2020-11-24T11:21:53
| 2020-11-24T11:21:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,242
|
py
|
# Grid search
# Recall that we've created a baseline Gradient Boosting model in the previous lesson. Your goal now is to find the best max_depth hyperparameter value for this Gradient Boosting model. This hyperparameter limits the number of nodes in each individual tree. You will be using K-fold cross-validation to measure the local performance of the model for each hyperparameter value.
# You're given a function get_cv_score(), which takes the train dataset and dictionary of the model parameters as arguments and returns the overall validation RMSE score over 3-fold cross-validation.
# Instructions
# 100 XP
# Specify the grid for possible max_depth values with 3, 6, 9, 12 and 15.
# Pass each hyperparameter candidate in the grid to the model params dictionary.
# Possible max depth values
max_depth_grid = [3,6,9,12,15]
results = {}
# For each value in the grid
for max_depth_candidate in max_depth_grid:
# Specify parameters for the model
params = {'max_depth': max_depth_candidate}
# Calculate validation score for a particular hyperparameter
validation_score = get_cv_score(train, params)
# Save the results for each max depth value
results[max_depth_candidate] = validation_score
print(results)
|
[
"noreply@github.com"
] |
MACHEIKH.noreply@github.com
|
eb8919a580c7e7e998422669d4fa651907a4c043
|
7950c4faf15ec1dc217391d839ddc21efd174ede
|
/explore/2021/april/Palindrome_Linked_List.py
|
3a0dbc3dad627a0957f59068800b0605ba19ddd9
|
[] |
no_license
|
lixiang2017/leetcode
|
f462ecd269c7157aa4f5854f8c1da97ca5375e39
|
f93380721b8383817fe2b0d728deca1321c9ef45
|
refs/heads/master
| 2023-08-25T02:56:58.918792
| 2023-08-22T16:43:36
| 2023-08-22T16:43:36
| 153,090,613
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 617
|
py
|
'''
Time: O(N)
Space: O(N)
You are here!
Your runtime beats 21.99 % of python submissions.
You are here!
Your memory usage beats 13.77 % of python submissions.
'''
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
cur = head
values = []
while cur:
values.append(cur.val)
cur = cur.next
return values == values[:: -1]
|
[
"838255715@qq.com"
] |
838255715@qq.com
|
4ab98705595c75687b2a1d43a82da9ce0f973aed
|
7a15271c7cddd199f43555469a67d26ce0f60836
|
/uncertainty_baselines/models/segmenter_gp.py
|
76a63b7daf4b436ae49c69c2f1b67b9b791125c4
|
[
"Apache-2.0"
] |
permissive
|
google/uncertainty-baselines
|
b2c339d918bf3949ee066f9eafa6b51232a2ac3d
|
f5f6f50f82bd441339c9d9efbef3f09e72c5fef6
|
refs/heads/main
| 2023-09-02T13:59:26.355288
| 2023-08-14T16:35:22
| 2023-08-14T16:36:11
| 280,026,201
| 1,235
| 198
|
Apache-2.0
| 2023-09-11T22:21:48
| 2020-07-16T01:54:32
|
Python
|
UTF-8
|
Python
| false
| false
| 6,167
|
py
|
# coding=utf-8
# Copyright 2023 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Segmenter GP Vision Transformer (ViT) model.
Based on scenic library implementation.
"""
from typing import Any, Callable, Tuple, Iterable
import edward2.jax as ed
import flax.linen as nn
import jax
import jax.numpy as jnp
import ml_collections
from uncertainty_baselines.models import segmenter
from uncertainty_baselines.models import segmenter_be
Array = Any
PRNGKey = Any
Shape = Tuple[int]
DType = type(jnp.float32)
InitializeFn = Callable[[jnp.ndarray, Iterable[int], DType], jnp.ndarray]
class SegVitGP(nn.Module):
"""Segmentation model with ViT backbone and decoder."""
num_classes: int
patches: ml_collections.ConfigDict
backbone_configs: ml_collections.ConfigDict
decoder_configs: ml_collections.ConfigDict
head_kernel_init: InitializeFn = nn.initializers.variance_scaling( # pytype: disable=annotation-type-mismatch # jax-types
0.02, 'fan_in', 'truncated_normal')
@nn.compact
def __call__(self, x: Array, *, train: bool, debug: bool = False):
"""Applies the module."""
input_shape = x.shape
b, h, w, _ = input_shape
fh, fw = self.patches.size
gh, gw = h // fh, w // fw
if self.backbone_configs.type == 'vit' and self.decoder_configs.type == 'linear':
assert self.backbone_configs.ens_size == 1
if self.backbone_configs.type == 'vit' and self.decoder_configs.type == 'linear_be':
raise NotImplementedError(
'Configuration with encoder {} and decoder {} is not implemented'
.format(
self.backbone_configs.type,
self.decoder_configs.type,
))
if self.backbone_configs.type == 'vit':
x, out = segmenter.ViTBackbone(
mlp_dim=self.backbone_configs.mlp_dim,
num_layers=self.backbone_configs.num_layers,
num_heads=self.backbone_configs.num_heads,
patches=self.patches,
hidden_size=self.backbone_configs.hidden_size,
dropout_rate=self.backbone_configs.dropout_rate,
attention_dropout_rate=self.backbone_configs.attention_dropout_rate,
classifier=self.backbone_configs.classifier,
name='backbone')(
x, train=train)
elif self.backbone_configs.type == 'vit_be':
x, out = segmenter_be.ViTBackboneBE(
mlp_dim=self.backbone_configs.mlp_dim,
num_layers=self.backbone_configs.num_layers,
num_heads=self.backbone_configs.num_heads,
patches=self.patches,
hidden_size=self.backbone_configs.hidden_size,
dropout_rate=self.backbone_configs.dropout_rate,
attention_dropout_rate=self.backbone_configs.attention_dropout_rate,
classifier=self.backbone_configs.classifier,
ens_size=self.backbone_configs.ens_size,
random_sign_init=self.backbone_configs.random_sign_init,
be_layers=self.backbone_configs.be_layers,
name='backbone')(
x, train=train)
else:
raise ValueError(f'Unknown backbone: {self.backbone_configs.type}.')
# remove CLS tokens for decoding
if self.backbone_configs.classifier == 'token':
x = x[..., 1:, :]
ens_size = self.backbone_configs.get('ens_size', 1)
if self.decoder_configs.type == 'linear':
# Linear head only, like Segmenter baseline:
# https://arxiv.org/abs/2105.05633
output_projection = nn.Dense(
self.num_classes,
kernel_init=self.head_kernel_init,
name='output_projection')
x = jnp.reshape(x, [b * ens_size, gh, gw, -1])
x = output_projection(x)
elif self.decoder_configs.type == 'gp':
# Gaussian process layer output: (logits, covmat, and *random features)
# *random features are optional
output_projection = ed.nn.RandomFeatureGaussianProcess(
features=self.num_classes,
name='output_projection',
**self.decoder_configs.gp_layer)
x = jnp.reshape(x, [b*ens_size*gh*gw, -1])
x_gp = output_projection(x)
out['logits_gp'] = x_gp[0]
out['covmat_gp'] = x_gp[1]
if len(x_gp) > 2:
out['random_features_gp'] = x_gp[2]
if not train:
# During inference, compute posterior mean by adjusting the original
# logits with predictive uncertainty.
x = ed.nn.utils.mean_field_logits(
logits=x_gp[0],
covmat=x_gp[1],
mean_field_factor=self.decoder_configs.mean_field_factor)
else:
x = x_gp[0]
x = jnp.reshape(x, [b*ens_size, gh, gw, -1])
elif self.decoder_configs.type == 'linear_be':
output_projection = ed.nn.DenseBatchEnsemble(
self.num_classes,
self.backbone_configs.ens_size,
activation=None,
alpha_init=ed.nn.utils.make_sign_initializer(
self.backbone_configs.get('random_sign_init')),
gamma_init=ed.nn.utils.make_sign_initializer(
self.backbone_configs.get('random_sign_init')),
kernel_init=self.head_kernel_init,
name='output_projection_be')
x = output_projection(x)
else:
raise ValueError(
f'Decoder type {self.decoder_configs.type} is not defined.')
# Resize bilinearly:
x = jax.image.resize(x, [b * ens_size, h, w, x.shape[-1]], 'bilinear')
out['logits'] = x
new_input_shape = tuple([
input_shape[0] * ens_size,
] + list(input_shape[1:-1]))
assert new_input_shape == x.shape[:-1], (
'BE Input and output shapes do not match: %d vs. %d.', new_input_shape,
x.shape[:-1])
return x, out
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
79cc42438964db5b2d6053c997019c47cc2affe2
|
969d094bfb09662b369278dc2cde1dc160a286b6
|
/For_Loops/03_odd_even_position.py
|
a146cc42f1f89bb7903ec3322e674bb3592cfd3e
|
[] |
no_license
|
IvayloValkov/Python-the-beginning
|
e96756105b56d7c0ae2687a82ccace1ca97bc895
|
4d074c32f8251af5a96aece1ae447d09db038026
|
refs/heads/main
| 2023-02-16T13:05:59.726572
| 2021-01-17T08:32:45
| 2021-01-17T08:32:45
| 330,342,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,087
|
py
|
import sys
n = int(input())
odd_sum = 0
odd_max = -sys.maxsize
odd_min = sys.maxsize
even_sum = 0
even_max = -sys.maxsize
even_min = sys.maxsize
for i in range(1, n + 1):
input_number = float(input())
if i % 2 == 0:
even_sum += input_number
if input_number > even_max:
even_max = input_number
if input_number < even_min:
even_min = input_number
else:
odd_sum += input_number
if input_number > odd_max:
odd_max = input_number
if input_number < odd_min:
odd_min = input_number
print(f'OddSum={odd_sum:.2f},')
if odd_min != sys.maxsize:
print(f'OddMin={odd_min:.2f},')
else:
print(f'OddMin=No,')
if odd_max != -sys.maxsize:
print(f'OddMax={odd_max:.2f},')
else:
print(f'OddMax=No,')
print(f'EvenSum={even_sum:.2f},')
if even_min != sys.maxsize:
print(f'EvenMin={even_min:.2f},')
else:
print(f'EvenMin=No,')
if even_max != -sys.maxsize:
print(f'EvenMax={even_max:.2f}')
else:
print(f'EvenMax=No')
|
[
"noreply@github.com"
] |
IvayloValkov.noreply@github.com
|
5e0e4793593f70670867c372bf60125379bc503e
|
2d0bada349646b801a69c542407279cc7bc25013
|
/src/vai_quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/python/core/clustering/keras/clustering_centroids_test.py
|
693276b6d53bd5037cb74fcd2afdafc97370dd90
|
[
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause-Open-MPI",
"LicenseRef-scancode-free-unknown",
"Libtool-exception",
"GCC-exception-3.1",
"LicenseRef-scancode-mit-old-style",
"OFL-1.1",
"JSON",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"ICU",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-issl-2018",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-or-later",
"Zlib",
"BSD-Source-Code",
"ClArtistic",
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"CC-BY-4.0",
"FSFULLR",
"Minpack",
"Unlicense",
"BSL-1.0",
"NAIST-2003",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-public-domain",
"Libpng",
"Spencer-94",
"BSD-2-Clause",
"Intel",
"GPL-1.0-or-later",
"MPL-2.0"
] |
permissive
|
Xilinx/Vitis-AI
|
31e664f7adff0958bb7d149883ab9c231efb3541
|
f74ddc6ed086ba949b791626638717e21505dba2
|
refs/heads/master
| 2023-08-31T02:44:51.029166
| 2023-07-27T06:50:28
| 2023-07-27T06:50:28
| 215,649,623
| 1,283
| 683
|
Apache-2.0
| 2023-08-17T09:24:55
| 2019-10-16T21:41:54
|
Python
|
UTF-8
|
Python
| false
| false
| 6,383
|
py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for keras clustering centroids initialisation API."""
import tensorflow as tf
import tensorflow.keras.backend as K
from absl.testing import parameterized
from tensorflow_model_optimization.python.core.clustering.keras import cluster_config
from tensorflow_model_optimization.python.core.clustering.keras import clustering_centroids
keras = tf.keras
errors_impl = tf.errors
layers = keras.layers
test = tf.test
CentroidInitialization = cluster_config.CentroidInitialization
class ClusteringCentroidsTest(test.TestCase, parameterized.TestCase):
"""Unit tests for the clustering_centroids module."""
def setUp(self):
self.factory = clustering_centroids.CentroidsInitializerFactory
@parameterized.parameters(
(CentroidInitialization.LINEAR),
(CentroidInitialization.RANDOM),
(CentroidInitialization.DENSITY_BASED),
)
def testExistingInitsAreSupported(self, init_type):
"""
Verifies that the given centroid initialization methods are supported.
"""
self.assertTrue(self.factory.init_is_supported(init_type))
def testNonExistingInitIsNotSupported(self):
self.assertFalse(self.factory.init_is_supported("DEADBEEF"))
@parameterized.parameters(
(
CentroidInitialization.LINEAR,
clustering_centroids.LinearCentroidsInitialisation
),
(
CentroidInitialization.RANDOM,
clustering_centroids.RandomCentroidsInitialisation
),
(
CentroidInitialization.DENSITY_BASED,
clustering_centroids.DensityBasedCentroidsInitialisation
),
)
def testReturnsMethodForExistingInit(self, init_type, method):
"""
Verifies that the centroid initializer factory method returns the expected
classes for the given initialization methods.
"""
self.assertEqual(self.factory.get_centroid_initializer(init_type), method)
def testThrowsValueErrorForNonExistingInit(self):
"""
Verifies that the centroid initializer factory method raises an exception
when invoked with an unsupported initialization method.
"""
with self.assertRaises(ValueError):
self.factory.get_centroid_initializer("DEADBEEF")
@parameterized.parameters(
(0, 0, 1, 1, 1, 0),
(0, 0, 5, 5, 1, 0),
(1, 2, 3, 4, 1, 1),
(7, 12, 17, 22, 1, 5),
(-5, 4, 7, 10, 1.0 / 2.0, 13.0 / 2.0),
)
def testLinearSolverConstruction(self, x1, y1, x2, y2, a, b):
"""
Verifies that a TFLinearEquationSolver is constructed correctly.
"""
solver = clustering_centroids.TFLinearEquationSolver(float(x1),
float(y1),
float(x2),
float(y2))
solver_a = solver.a
self.assertAlmostEqual(K.batch_get_value([solver_a])[0], a)
self.assertAlmostEqual(K.batch_get_value([solver.b])[0], b)
@parameterized.parameters(
(0, 0, 1, 1, 5, 5),
(0, 0, 5, 5, 20, 20),
(1, 2, 3, 4, 3, 4),
(7, 12, 17, 22, 3, 8),
)
def testLinearSolverSolveForX(self, x1, y1, x2, y2, x, y):
"""
Verifies that TFLinearEquationSolver solves the given equations correctly
for X.
"""
solver = clustering_centroids.TFLinearEquationSolver(float(x1),
float(y1),
float(x2),
float(y2))
for_x = solver.solve_for_x(y)
self.assertAlmostEqual(K.batch_get_value([for_x])[0], x)
@parameterized.parameters(
(0, 0, 1, 1, 5, 5),
(0, 0, 5, 5, 20, 20),
(1, 2, 3, 4, 3, 4),
(7, 12, 17, 22, 3, 8),
)
def testLinearSolverSolveForY(self, x1, y1, x2, y2, x, y):
"""
Verifies that TFLinearEquationSolver solves the given equations correctly
for Y.
"""
solver = clustering_centroids.TFLinearEquationSolver(float(x1),
float(y1),
float(x2),
float(y2))
for_y = solver.solve_for_y(x)
self.assertAlmostEqual(K.batch_get_value([for_y])[0], y)
@parameterized.parameters(
([1, 2, 6, 7], 4, 0.5),
([1, 2, 6, 7], 1, 1. / 4.),
([1, 2, 3, 4, 5, 6, 7, 8, 9], 3, 1. / 3.),
([1, 2, 3, 4, 5, 6, 7, 8, 9], 99, 1.),
([1, 2, 3, 4, 5, 6, 7, 8, 9], -20, 0.)
)
def testCDFValues(self, weights, point, probability):
"""
Verifies that TFCumulativeDistributionFunction yields the expected output
for the inputs provided.
"""
cdf_calc = clustering_centroids.TFCumulativeDistributionFunction(weights)
self.assertAlmostEqual(
probability,
K.batch_get_value([cdf_calc.get_cdf_value(point)])[0]
)
@parameterized.parameters(
(
[0, 1, 2, 3, 3.1, 3.2, 3.3, 3.4, 3.5],
5,
[0.11137931, 2.0534482, 3.145862, 3.3886206, 3.51]
),
(
[0, 1, 2, 3, 3.1, 3.2, 3.3, 3.4, 3.5],
3,
[0.11137931, 3.145862, 3.51]
),
(
[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.],
3,
[0.3010345, 5.2775865, 9.01]
)
)
def testClusterCentroids(self, weights, number_of_clusters, centroids):
dbci = clustering_centroids.DensityBasedCentroidsInitialisation(
weights,
number_of_clusters
)
calc_centroids = K.batch_get_value([dbci.get_cluster_centroids()])[0]
self.assertSequenceAlmostEqual(centroids, calc_centroids, places=4)
if __name__ == '__main__':
test.main()
|
[
"hanxue.lee@xilinx.com"
] |
hanxue.lee@xilinx.com
|
0e1c35982cd1e8a0dac5bd43a934045a405885ac
|
1058861a696e8b9882175b786fec131f396d69f2
|
/task_app/migrations/0001_initial.py
|
3816d3d7eb537e306c14c4b3d442babb4d18d4b5
|
[] |
no_license
|
wgoode3/djangular-example
|
f79622442532fa5dc5450f4c5ed8e39ce6f784c3
|
0a8924ea95a7a2faed6865b60f06ceb4a5aed5bb
|
refs/heads/master
| 2020-03-28T04:38:59.091342
| 2018-12-19T21:08:27
| 2018-12-19T21:08:27
| 147,727,910
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 756
|
py
|
# Generated by Django 2.1.1 on 2018-09-06 18:49
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('description', models.CharField(max_length=255)),
('status', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"wgoode3@gmail.com"
] |
wgoode3@gmail.com
|
044533f19e009cf5a932c77574f9acb421be9c94
|
ea1af1a564f96fb36974aa094192877598b0c6bf
|
/Chapter10/Exercises/ex10_2.py
|
5a3786e78e11934cfb22408aad7d0cc3eefeda94
|
[] |
no_license
|
GSantos23/Crash_Course
|
63eecd13a60141e520b5ca4351341c21c4782801
|
4a5fc0cb9ce987948a728d43c4f266d34ba49a87
|
refs/heads/master
| 2020-03-20T23:20:43.201255
| 2018-08-21T01:13:06
| 2018-08-21T01:13:06
| 137,841,877
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 800
|
py
|
# Exerrcise 10.2
'''
Learning C: You can use the replace() method to replace any word in a
string with a different word. Here's a quick example showing how to replace
'dog' with 'cat' in a sentence:
-----------------------------------------------------------------------
>>> message = "I realy like dogs."
>>> messsage.replace('dog', 'cat')
'I really like cats.'
-----------------------------------------------------------------------
Read each line from the file you just created, learning_python.txt, and
replace the word Python with the name of another language, such as C. Print
each modified line to the screen.
'''
filename = 'learning_python.txt'
with open(filename) as file_object:
code_text = file_object.readlines()
for lines in code_text:
print(lines.replace('Python', 'C').strip())
|
[
"santosgerson64@gmail.com"
] |
santosgerson64@gmail.com
|
799f715fba061b3e4141658da26aa45c489d4fb7
|
e396fb9580ff90f7896dba3416be3a7bef81f367
|
/rdflib/namespace/RDFS.py
|
b32a830c87dbb4a7af617304e26edefa504411b5
|
[
"CC0-1.0"
] |
permissive
|
Philani7777777/definednamespace
|
c9e37ccc41762ff07e8b9e800a20b11a187ca355
|
f1178ba9c36a94bbd422844f4ddc71de67521d7b
|
refs/heads/master
| 2022-09-24T14:40:43.844447
| 2020-05-27T04:35:20
| 2020-05-27T04:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,742
|
py
|
from rdflib.term import URIRef
from rdflib.namespace import DefinedNamespace, Namespace
class RDFS(DefinedNamespace):
"""
The RDF Schema vocabulary (RDFS)
Generated from: http://www.w3.org/2000/01/rdf-schema#
Date: 2020-05-26 14:20:05.794866
rdfs:seeAlso <http://www.w3.org/2000/01/rdf-schema-more>
"""
# http://www.w3.org/1999/02/22-rdf-syntax-ns#Property
comment: URIRef # A description of the subject resource.
domain: URIRef # A domain of the subject property.
isDefinedBy: URIRef # The defininition of the subject resource.
label: URIRef # A human-readable name for the subject.
member: URIRef # A member of the subject resource.
range: URIRef # A range of the subject property.
seeAlso: URIRef # Further information about the subject resource.
subClassOf: URIRef # The subject is a subclass of a class.
subPropertyOf: URIRef # The subject is a subproperty of a property.
# http://www.w3.org/2000/01/rdf-schema#Class
Class: URIRef # The class of classes.
Container: URIRef # The class of RDF containers.
ContainerMembershipProperty: URIRef # The class of container membership properties, rdf:_1, rdf:_2, ..., all of which are sub-properties of 'member'.
Datatype: URIRef # The class of RDF datatypes.
Literal: URIRef # The class of literal values, eg. textual strings and integers.
Resource: URIRef # The class resource, everything.
_NS = Namespace("http://www.w3.org/2000/01/rdf-schema#")
|
[
"solbrig@jhu.edu"
] |
solbrig@jhu.edu
|
bc1c50c9adb00b6d195e495eedb5e73b7c85c345
|
a8ba2295b41b26716dc6dbf62392c7ea9ef5ea08
|
/apps/calificacion/views.py
|
ae9ab7a486377042221e173d02a485c0f35e8e22
|
[] |
no_license
|
clcneogeek325/iicea
|
7131fd335db94a4af8dbddf5d0126672fc3b312e
|
328079ee6e642dc2ecda3b9fd4bf119d81260f3d
|
refs/heads/master
| 2021-01-25T05:15:44.448413
| 2014-12-18T03:34:45
| 2014-12-18T03:34:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,080
|
py
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from .models import calificacion
from .forms import calificacionForm
from django.http import HttpResponse,HttpResponseRedirect
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from apps.semestre.models import semestre
from apps.alumno.models import alumno
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.decorators import login_required
from iicea.settings import URL_LOGIN
from django.template.loader import render_to_string
import cStringIO as StringIO
import ho.pisa as pisa
import cgi
def generar_pdf(html):
result = StringIO.StringIO()
pdf = pisa.pisaDocument(StringIO.StringIO(html.encode("UTF-8")), result)
if not pdf.err:
return HttpResponse(result.getvalue(),content_type='application/pdf')
return HttpResponse('Error al generar el PDF: %s' % cgi.escape(html))
def pdf(request):
ctx = {'pagesize':'A4'}
html = render_to_string('calificacion/pdf.html', ctx,
context_instance=RequestContext(request))
return generar_pdf(html)
#===============================================
#===============================================
@login_required(login_url=URL_LOGIN)
def view_lista_calificacions(request):
contact_list = calificacion.objects.order_by('id').reverse()
paginator = Paginator(contact_list, 3)# Show 25 contacts per page
page = request.GET.get('page')
try:
lista = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
lista = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
lista = paginator.page(paginator.num_pages)
ctx = {'lista':lista}
return render_to_response("calificacion/lista.html",ctx,
context_instance=RequestContext(request))
@login_required(login_url=URL_LOGIN)
def view_eliminar_calificacion(request,id):
c = calificacion.objects.get(pk=id)
c.activo = False
c.save()
return HttpResponseRedirect('/calificacion/')
@login_required(login_url=URL_LOGIN)
def view_calificaciones_alumno(request,id):
lista = semestre.objects.filter(activo=True)
ctx = {'lista':lista,'id_alumno':id}
return render_to_response("calificacion/semestres.html",ctx,
context_instance=RequestContext(request))
def calificaciones_alumno_x_semestre(request,id_semestre,id_user):
print id_semestre,"---",id_user
s = semestre.objects.get(pk=id_semestre)
a = alumno.objects.get(alumno_id=id_user)
lista = calificacion.objects.filter(alumno=a,semestre=s)
msg = "Lista de Calificaciones"
ctx = {'lista':lista,'msg':msg,'id_semestre':id_semestre,'id_user':id_user}
return ctx
@login_required(login_url=URL_LOGIN)
def view_calificaciones_alumno_x_semestre(request,id_semestre,id_user):
ctx = calificaciones_alumno_x_semestre(request,id_semestre,id_user)
return render_to_response("calificacion/calificaciones.html",ctx,
context_instance=RequestContext(request))
def pdf_calificaciones_alumno_x_semestre(request,id_semestre,id_user):
ctx = calificaciones_alumno_x_semestre(request,id_semestre,id_user)
html = render_to_string("calificacion/pdf.html",ctx,
context_instance=RequestContext(request))
return generar_pdf(html)
@login_required(login_url=URL_LOGIN)
def view_editar_calificacion(request,id):
try:
a = calificacion.objects.get(pk=id)
if request.method == "POST":
form = calificacionForm(request.POST,instance=a)
if form.is_valid():
form.save()
return HttpResponseRedirect("/calificacion/")
else:
print "no valido",form.errors
form = calificacionForm(request.POST)
ctx = {'form':form}
return render_to_response('calificacion/edit.html',ctx,
context_instance=RequestContext(request))
else:
form = calificacionForm(instance=a)
ctx = {'form':form}
return render_to_response('calificacion/edit.html',ctx,
context_instance=RequestContext(request))
except ObjectDoesNotExist:
ctx = {'msg':"No se encontro el perfil solicitado"}
return render_to_response('msg.html',ctx,
context_instance=RequestContext(request))
@login_required(login_url=URL_LOGIN)
def view_agregar_calificacion(request):
if calificacion.objects.filter(activo=True).exists():
datos = calificacion.objects.filter(activo=True).order_by('id').reverse()
ultimo_alumno = {'alumno':datos[0].alumno,'semestre':datos[0].semestre}
else:
ultimo_alumno = {}
if request.method == "POST":
form = calificacionForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect("/calificacion/")
else:
form = calificacionForm(request.POST)
ctx = {'form':form}
return render_to_response('calificacion/add.html',ctx,
context_instance=RequestContext(request))
else:
form = calificacionForm(initial=ultimo_alumno)
ctx = {'form':form}
return render_to_response('calificacion/add.html',ctx,
context_instance=RequestContext(request))
|
[
"clcneogeek@gmail.com"
] |
clcneogeek@gmail.com
|
3a376fb7cbc7165ed0919498a1c070330e60c6ff
|
d587b67e83a8e598e2d84bbf23edbbc395429a1a
|
/baiscRevision/feb21Class.py
|
611499997306a15bcc875480c9735107daf2f532
|
[
"MIT"
] |
permissive
|
koromax1/code_for_Kids
|
e7d87264918ca7dc5d6edf62b2c1fa672a380bcd
|
ee4407f503b76fe56419b89008a28d5bfabe3592
|
refs/heads/main
| 2023-04-08T00:24:40.113132
| 2021-03-31T18:01:36
| 2021-03-31T18:01:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,616
|
py
|
#revision
"""
1. Basic Data Type
2. addition,
3. if else, nested if else
4. for loop, while loop, (loop + if else)
5. function
"""
var1 = 100 #integer
var2 = 100.501 #float
var3 = True #boolean
var4 = 'Python' #string
#how to check the type of these variables?
# print(type(var1))
# print(type(var3))
#how to print something
# print('Hello World!') #single string print
# + == generic addition
# , == generic addition with type change
# print(var2 + var4) # var2 + var4
# print(var2 , var4) # var2 = str(var2) + 'Python'
#string Manipulation::: addition
# result = "float value: "+ str(var2) +" "+"string value: "+ var4 +" "+"boolean: "+ str(var3)
# print(result)
#if else for boolean types
'''
var5 = False #take an umbrella or not
rainy_weather = False
if rainy_weather == True:
if var5 == True:
print('You just save yourself from rain')
elif var5 == False:
print('you will get drenched in rain')
elif rainy_weather == False:
if var5 == True:
print('smart boy')
else:
print('lucky!!')
else:
print('You are not saved')
'''
# for i in range(5):
# print(i)
# if i == 2:
# print('Black Widow')
# if i % 2 == 0:
# print('BATMAN')
# else:
# print('Spiderman')
# i = 1
# while i < 5:
# print(i)
# i = i + 1
#function
def addition():
a = 5
b = 5
print('The addition function output is ')
print(a+b)
# addition()
#parameter or argument pass
def substraction(value):
a = 10
print('The substraction value is ')
print(a - value)
substraction(3)
|
[
"omarhasan115@gmail.com"
] |
omarhasan115@gmail.com
|
fbc2a37d26fc1291c81b5a80f7b93341e7c4f4a8
|
4c9c98b7a5b21848e53dfa8fb6ead1d9ea412d48
|
/algorithms/bit_manipulation/python/lonely_integer.py
|
3e6d8164fcf2633c7c2160a26b764ad7037fe12f
|
[] |
no_license
|
thommms/hacker_rank
|
1e701c4a932e4f4c196d38fd32c7155a68da079c
|
fe8b05e0e73425df5d4011b290add418d461eef9
|
refs/heads/master
| 2020-03-19T12:50:00.808869
| 2018-05-28T17:42:09
| 2018-05-28T17:42:09
| 136,543,275
| 1
| 0
| null | 2018-06-07T23:50:26
| 2018-06-07T23:50:26
| null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
from functools import reduce
n, A = int(input()), [int(x) for x in input().strip().split(' ')]
# for x in A:
# # print(A.count(x))
# if A.count(x) % 2 != 0:
# print(" ".join(str(x)))
answer = reduce((lambda x, y: x ^ y), A)
print(answer)
|
[
"runcy.oommen@gmail.com"
] |
runcy.oommen@gmail.com
|
96230b8a541d32409872d48c2fc7ee9d476559d3
|
5a8304c26aaa0e0c87ae4daafa3f1c5f56714e5d
|
/ProTwo/ProTwo/appTwo/migrations/0001_initial.py
|
55b93a818b16730b2e068c6604b4d4dedda06aac
|
[] |
no_license
|
jack456054/Django-test
|
c625460f3e3b2061eff6d13dd095e32bcf3e3220
|
501837dd80608a8c982214e41f6b746655aabca5
|
refs/heads/master
| 2023-04-28T01:21:28.688973
| 2019-10-02T06:58:31
| 2019-10-02T06:58:31
| 210,776,683
| 0
| 0
| null | 2023-04-21T20:38:20
| 2019-09-25T06:57:10
|
Python
|
UTF-8
|
Python
| false
| false
| 630
|
py
|
# Generated by Django 2.2.5 on 2019-09-27 03:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=128)),
('last_name', models.CharField(max_length=128)),
('email', models.EmailField(max_length=264, unique=True)),
],
),
]
|
[
"jack456054@hotmail.com"
] |
jack456054@hotmail.com
|
fc21204d2e8e095e9a3d71541379fab4054538ac
|
894b290b4f4f47b5eb523c23efd7bd6110d91b2f
|
/44_xhs_note/xhs_note/xhs_note/scripts/xhs_transform.py
|
9fbe1879beb2c7540b72e328915d81996f564fd9
|
[] |
no_license
|
wliustc/SpiderS
|
6650c00616d11239de8c045828bafdc5a299b1ce
|
441f309c50d28c1a3917bed19321cd5cbe7c2861
|
refs/heads/master
| 2020-03-27T06:15:39.495785
| 2018-06-14T07:55:44
| 2018-06-14T07:55:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,640
|
py
|
# -*- coding: utf-8 -*-
import sys
import json
import re
reload(sys)
sys.setdefaultencoding('utf-8')
_mapping = {
'sellCount':re.compile(r'\\"sellCount\\":\\"(\d+)\\"'),
}
def get_regex_group1(key,_str, default=None):
p = _mapping[key]
m = p.search(_str)
if m:
return m.group(1)
return default
def get_json_hierarchy(_json_obj, arch_ele_list):
for e in arch_ele_list:
if e not in _json_obj:
return None
_json_obj = _json_obj[e]
return _json_obj
def format_list(data):
result = []
if data:
for item in data:
tmp = ''
if item:
if type(item) == unicode:
tmp = item.encode('utf-8')
tmp = tmp.replace('\u0001','')
tmp = tmp.replace('\n',' ')
tmp = tmp.replace('\t',' ')
tmp = tmp.replace('\r',' ')
tmp = re.sub(r'[\x01-\x1f]','', tmp)
tmp = tmp.strip()
elif type(item) == int:
tmp = str(item)
elif type(item) == str:
tmp = item.encode('utf-8').replace("\u0001",'')
tmp = tmp.replace('\n',' ')
tmp = re.sub(r'[\x01-\x1f]','', tmp)
tmp = tmp.replace('\t',' ')
tmp = tmp.replace('\r',' ')
tmp = tmp.decode('utf-8').strip()
else:
tmp = item
result.append(tmp)
return result
for line in sys.stdin:
try:
line = json.loads(line)
line = line['content']
result = []
if line:
note = json.loads(line['note'])
lists = json.loads(line['list'])
#id
result.append(lists['id'])
#task_date
result.append(line['task_date'])
#oid
result.append(line['oid'])
#list
result.append(line['list'])
#note
result.append(line['note'])
#comments
if 'comments' in note:
result.append(note['comments'])
else:
result.append(None)
#category
if 'category' in note:
result.append(note['category'])
else:
result.append(None)
#p_time
times = note.get('time',None)
result.append("{0}:00".format(times))
result.append(line['task_date'])
print "\t".join(format_list(result))
except:
print "$$$$$$$$$$$$$ ex"
pass
|
[
"luoshao23@gmail.com"
] |
luoshao23@gmail.com
|
f79fe5e3d38708362ecb883e7298586ff89912a3
|
0dae97b2205ef5d8ce884ec2af4bf99ad2baec43
|
/drf_admin/apps/monitor/views/error.py
|
85e4282f3af0cc742498d883c97e4d8ba6ab05f3
|
[
"MIT"
] |
permissive
|
15051882416/drf_admin
|
2520affacd0345d042b499c3e9a56a112cc235d5
|
0b31fa5248afb6fc20e6ef425b2dcc4d39977d81
|
refs/heads/master
| 2022-12-31T04:57:27.017134
| 2020-10-24T01:09:58
| 2020-10-24T01:09:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,013
|
py
|
# -*- coding: utf-8 -*-
"""
@author : Wang Meng
@github : https://github.com/tianpangji
@software : PyCharm
@file : error.py
@create : 2020/10/3 16:18
"""
from rest_framework import status
from rest_framework.filters import SearchFilter
from rest_framework.generics import ListAPIView
from rest_framework.response import Response
from monitor.models import ErrorLogs
from monitor.serializers.error import ErrorLogsSerializer
class ErrorLogAPIView(ListAPIView):
"""
get:
监控--错误日志列表
错误日志列表, status: 200(成功), return: 错误日志列表信息
delete:
监控--错误日志清空
错误日志清空, status: 204(成功), return: None
"""
queryset = ErrorLogs.objects.all()
serializer_class = ErrorLogsSerializer
filter_backends = (SearchFilter,)
search_fields = ('username', 'view', 'desc', 'ip')
def delete(self, request):
self.queryset.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
|
[
"921781999@qq.com"
] |
921781999@qq.com
|
8b6ef84075551101e3b0b9f5f29542a3f477fbe9
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03455/s301099697.py
|
d18a961cc423fd1f50780324308fcbf6869269e6
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
input = input().strip().split()
a = int(input[0])
b = int(input[1])
if a % 2 == 0 or b % 2 == 0:
print('Even')
else:
print('Odd')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
59b74c155bf78c020afb0694200450f11e982f0e
|
0e4d09b2a1b93aaa6d623d16905854d993a934ae
|
/Python/Django/belt_reviewer/apps/bookReviews/apps.py
|
f6a890ac833bc47e8802d8b4cb392f83db148f59
|
[] |
no_license
|
freefaller69/DojoAssignments
|
ee7f6308b02041be3244f795422e0e044d4a41b2
|
f40426ac448026c1172048665f36024ad22f0d81
|
refs/heads/master
| 2021-01-17T10:23:39.419514
| 2017-07-25T00:50:41
| 2017-07-25T00:50:41
| 84,012,790
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class BookreviewsConfig(AppConfig):
name = 'bookReviews'
|
[
"freefaller@gmail.com"
] |
freefaller@gmail.com
|
4f38cefdcab4a44e41529b84691a9e960842084c
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_falconers.py
|
0fce1a8cca8e6471c93e68ec9dd97d82dc818c42
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
#calss header
class _FALCONERS():
def __init__(self,):
self.name = "FALCONERS"
self.definitions = falconer
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['falconer']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
bd9179a9b52e2b845931041c86375a59b7643ac9
|
b15d2787a1eeb56dfa700480364337216d2b1eb9
|
/accelbyte_py_sdk/api/ugc/operations/anonymization/delete_all_user_channel.py
|
e79b27369aad54884a209d7c6733f5955d9219bd
|
[
"MIT"
] |
permissive
|
AccelByte/accelbyte-python-sdk
|
dedf3b8a592beef5fcf86b4245678ee3277f953d
|
539c617c7e6938892fa49f95585b2a45c97a59e0
|
refs/heads/main
| 2023-08-24T14:38:04.370340
| 2023-08-22T01:08:03
| 2023-08-22T01:08:03
| 410,735,805
| 2
| 1
|
MIT
| 2022-08-02T03:54:11
| 2021-09-27T04:00:10
|
Python
|
UTF-8
|
Python
| false
| false
| 6,830
|
py
|
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: ags_py_codegen
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
# AccelByte Gaming Services Ugc Service (2.11.3)
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HeaderStr
from .....core import HttpResponse
from ...models import ResponseError
class DeleteAllUserChannel(Operation):
"""Delete all user channel (DeleteAllUserChannel)
Required permission NAMESPACE:{namespace}:USER:{userId}:CHANNEL [DELETE]
Required Permission(s):
- NAMESPACE:{namespace}:USER:{userId}:CHANNEL [DELETE]
Properties:
url: /ugc/v1/public/namespaces/{namespace}/users/{userId}/channels
method: DELETE
tags: ["Anonymization"]
consumes: ["application/json", "application/octet-stream"]
produces: ["application/json"]
securities: [BEARER_AUTH]
namespace: (namespace) REQUIRED str in path
user_id: (userId) REQUIRED str in path
Responses:
204: No Content - (No Content)
401: Unauthorized - ResponseError (Unauthorized)
404: Not Found - ResponseError (Not Found)
500: Internal Server Error - ResponseError (Internal Server Error)
"""
# region fields
_url: str = "/ugc/v1/public/namespaces/{namespace}/users/{userId}/channels"
_method: str = "DELETE"
_consumes: List[str] = ["application/json", "application/octet-stream"]
_produces: List[str] = ["application/json"]
_securities: List[List[str]] = [["BEARER_AUTH"]]
_location_query: str = None
namespace: str # REQUIRED in [path]
user_id: str # REQUIRED in [path]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def securities(self) -> List[List[str]]:
return self._securities
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"path": self.get_path_params(),
}
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
if hasattr(self, "user_id"):
result["userId"] = self.user_id
return result
# endregion get_x_params methods
# region is/has methods
# endregion is/has methods
# region with_x methods
def with_namespace(self, value: str) -> DeleteAllUserChannel:
self.namespace = value
return self
def with_user_id(self, value: str) -> DeleteAllUserChannel:
self.user_id = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
if hasattr(self, "user_id") and self.user_id:
result["userId"] = str(self.user_id)
elif include_empty:
result["userId"] = ""
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(
self, code: int, content_type: str, content: Any
) -> Tuple[None, Union[None, HttpResponse, ResponseError]]:
"""Parse the given response.
204: No Content - (No Content)
401: Unauthorized - ResponseError (Unauthorized)
404: Not Found - ResponseError (Not Found)
500: Internal Server Error - ResponseError (Internal Server Error)
---: HttpResponse (Undocumented Response)
---: HttpResponse (Unexpected Content-Type Error)
---: HttpResponse (Unhandled Error)
"""
pre_processed_response, error = self.pre_process_response(
code=code, content_type=content_type, content=content
)
if error is not None:
return None, None if error.is_no_content() else error
code, content_type, content = pre_processed_response
if code == 204:
return None, None
if code == 401:
return None, ResponseError.create_from_dict(content)
if code == 404:
return None, ResponseError.create_from_dict(content)
if code == 500:
return None, ResponseError.create_from_dict(content)
return self.handle_undocumented_response(
code=code, content_type=content_type, content=content
)
# endregion response methods
# region static methods
@classmethod
def create(cls, namespace: str, user_id: str, **kwargs) -> DeleteAllUserChannel:
instance = cls()
instance.namespace = namespace
instance.user_id = user_id
return instance
@classmethod
def create_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> DeleteAllUserChannel:
instance = cls()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
if "userId" in dict_ and dict_["userId"] is not None:
instance.user_id = str(dict_["userId"])
elif include_empty:
instance.user_id = ""
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"namespace": "namespace",
"userId": "user_id",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"namespace": True,
"userId": True,
}
# endregion static methods
|
[
"elmernocon@gmail.com"
] |
elmernocon@gmail.com
|
1f214def1e5f25602cc33de641d1c798d8190ae4
|
3ae36a5791c26bb7b41a6ed7d81d16cb45cfb8c9
|
/python_sicp/homework4.py
|
a0a8a7a7f8df743e9a01f1bf94f594462968a121
|
[] |
no_license
|
crossin/Crossin-practices
|
0ef23022e3f298862aa831a7cb9684dc4aa04653
|
1b0cbe8db9b947122c40dcfca4ae883cd99b6087
|
refs/heads/master
| 2021-01-01T16:42:52.298084
| 2017-07-11T01:17:38
| 2017-07-11T01:17:38
| 97,899,778
| 1
| 0
| null | 2017-07-21T02:58:33
| 2017-07-21T02:58:33
| null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
#question1
def make_counter():
dct = {}
def counter(x):
dct[x] = dct.get(x,0) + 1
return dct[x]
return counter
c = make_counter()
c('a')
c('b')
# print(c('c'))
# question2
def make_fib():
fib_num = 0
next_num = 1
def fib():
nonlocal fib_num
nonlocal next_num
next_num,fib_num = next_num+fib_num,next_num
return fib_num
return fib
f = make_fib()
print(f())
print(f())
print(f())
print(f())
|
[
"782744680@qq.com"
] |
782744680@qq.com
|
f06b8923cf042a7a8f0b46519c24463e8c09ceab
|
0eaf0d3f0e96a839f2ef37b92d4db5eddf4b5e02
|
/abc132/b.py
|
1ce7d09d63f948f5793250950b03d97d686964c4
|
[] |
no_license
|
silphire/atcoder
|
b7b02798a87048757745d99e8564397d1ca20169
|
f214ef92f13bc5d6b290746d5a94e2faad20d8b0
|
refs/heads/master
| 2023-09-03T17:56:30.885166
| 2023-09-02T14:16:24
| 2023-09-02T14:16:24
| 245,110,029
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
n = int(input())
p = list(map(int, input().split()))
x = 0
for i in range(n - 2):
if p[i + 1] == sorted(p[i:i+3])[1]:
x += 1
print(x)
|
[
"silphire@gmail.com"
] |
silphire@gmail.com
|
27f4ae0b2cabf4a2f7cb7b767fca5ee8f99b9cb5
|
699b5dbc51b5a8bc22d0e0e5b6ce7287c9948603
|
/tests/conftest.py
|
8b5746dfa8d6085a2f3e8e27c4af358027be8ae6
|
[] |
no_license
|
gvalkov/riemann-python-api
|
ccf3db14e620a274db0a748472c93b3ddcabb619
|
873222dfdd61670333dbcf6804755a250357ebc4
|
refs/heads/master
| 2021-01-16T21:16:43.177708
| 2016-07-21T23:07:13
| 2016-07-21T23:07:13
| 62,182,384
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
import pytest
@pytest.fixture
def dummy_socket():
return DummySocket()
class DummySocket:
def __init__(self):
self.data = [b'hello', b'world', b'']
def recv(self, bufsize):
return self.data.pop(0)
@pytest.fixture
def transport():
return None
|
[
"georgi.t.valkov@gmail.com"
] |
georgi.t.valkov@gmail.com
|
5cecbd6920e73728d767dc6630ee6999dac4c5fa
|
d93159d0784fc489a5066d3ee592e6c9563b228b
|
/FWCore/Services/test/fpe_test_2_cfg.py
|
c8967e9decfd318751718a2b93c2a094c3482857
|
[] |
permissive
|
simonecid/cmssw
|
86396e31d41a003a179690f8c322e82e250e33b2
|
2559fdc9545b2c7e337f5113b231025106dd22ab
|
refs/heads/CAallInOne_81X
| 2021-08-15T23:25:02.901905
| 2016-09-13T08:10:20
| 2016-09-13T08:53:42
| 176,462,898
| 0
| 1
|
Apache-2.0
| 2019-03-19T08:30:28
| 2019-03-19T08:30:24
| null |
UTF-8
|
Python
| false
| false
| 2,141
|
py
|
# Unit test configuration file for EnableFloatingPointExceptions service
import os # Since we have a general-purpose programming langauge, we'll use it!
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
import FWCore.Framework.test.cmsExceptionsFatal_cff
process.options = FWCore.Framework.test.cmsExceptionsFatal_cff.options
process.load("FWCore.Services.InitRootHandlers_cfi")
process.EnableFloatingPointExceptions = cms.Service("EnableFloatingPointExceptions",
moduleNames = cms.untracked.vstring('default', 'nofpe', 'module2'),
default = cms.untracked.PSet(
enableOverFlowEx = cms.untracked.bool(eval(os.getenv("OVERFLOW"))),
enableDivByZeroEx = cms.untracked.bool(False),
enableInvalidEx = cms.untracked.bool(eval(os.getenv("INVALID"))),
enableUnderFlowEx = cms.untracked.bool(eval(os.getenv("UNDERFLOW")))
),
module2 = cms.untracked.PSet(
enableOverFlowEx = cms.untracked.bool(False),
enableDivByZeroEx = cms.untracked.bool(eval(os.getenv("DIVIDEBYZERO"))),
enableInvalidEx = cms.untracked.bool(False),
enableUnderFlowEx = cms.untracked.bool(False)
),
nofpe = cms.untracked.PSet(
enableOverFlowEx = cms.untracked.bool(True),
enableDivByZeroEx = cms.untracked.bool(True),
enableInvalidEx = cms.untracked.bool(True),
enableUnderFlowEx = cms.untracked.bool(True)
),
setPrecisionDouble = cms.untracked.bool(True),
reportSettings = cms.untracked.bool(False)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource")
process.module1 = cms.EDAnalyzer("FpeTester", testname = cms.string("overflow"))
process.module2 = cms.EDAnalyzer("FpeTester", testname = cms.string("division"))
process.module3 = cms.EDAnalyzer("FpeTester", testname = cms.string("invalid"))
process.module4 = cms.EDAnalyzer("FpeTester", testname = cms.string("underflow"))
process.nofpe = cms.EDAnalyzer("FpeTester", testname = cms.string("nofpe"))
process.p = cms.Path(process.nofpe*process.module1*process.module2*process.module3*process.module4)
|
[
"giulio.eulisse@gmail.com"
] |
giulio.eulisse@gmail.com
|
31d1db09e594ff0a03df0641f7486c2caaebbadf
|
1ada3010856e39c93e2483c960aa8fc25e2b3332
|
/TopInterviewQuestions/BinarySearchIterative.py
|
2a47e4120cd7ba3dd11f23e4f40a9d9730fbea71
|
[] |
no_license
|
Taoge123/LeetCode
|
4f9e26be05f39b37bdbb9c1e75db70afdfa1b456
|
4877e35a712f59bc7b8fffa3d8af2ffa56adb08c
|
refs/heads/master
| 2022-02-24T20:09:21.149818
| 2020-07-31T03:18:05
| 2020-07-31T03:18:05
| 142,700,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
def binarySearch(arr, l, r, x):
while l <= r:
mid = l + (r - l) // 2
if arr[mid] == x:
return mid
elif arr[mid] < x:
l = mid + 1
else:
r = mid - 1
return -1
arr = [2, 3, 4, 10, 40]
x = 10
# Function call
result = binarySearch(arr, 0, len(arr) - 1, x)
if result != -1:
print("Element is present at index %d" % result)
else:
print("Element is not present in array")
|
[
"taocheng984@gmail.com"
] |
taocheng984@gmail.com
|
03b2d22ccc0320ef2c505d0c9c9187a3a442d8fc
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_rosebud.py
|
32f351d1d00dcba1d7d029720f46c5640a86bcc3
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 318
|
py
|
#calss header
class _ROSEBUD():
def __init__(self,):
self.name = "ROSEBUD"
self.definitions = [u'the beginning stage of a rose flower']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
3475f2836d81a4eaf92185524a9ef8a17f6e6b76
|
72e5338e393ce7ced7b9737542b84dc4257659b0
|
/migen/test/test_sort.py
|
163be8c2f38036b24d895c8566ecc0f5bec0619f
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mogorman/migen
|
2a2c86feb79f065a6365a6f615c93a9ef916b184
|
467272f1a77be616ccbed8a5b2e1a0756ce59b6b
|
refs/heads/master
| 2021-01-17T21:37:46.782144
| 2015-03-10T05:30:28
| 2015-03-10T05:30:28
| 30,615,751
| 1
| 0
| null | 2015-02-10T21:34:19
| 2015-02-10T21:34:19
| null |
UTF-8
|
Python
| false
| false
| 734
|
py
|
import unittest
from random import randrange
from migen.fhdl.std import *
from migen.genlib.sort import *
from migen.test.support import SimCase, SimBench
class BitonicCase(SimCase, unittest.TestCase):
class TestBench(SimBench):
def __init__(self):
self.submodules.dut = BitonicSort(8, 4, ascending=True)
def test_sizes(self):
self.assertEqual(len(self.tb.dut.i), 8)
self.assertEqual(len(self.tb.dut.o), 8)
for i in range(8):
self.assertEqual(flen(self.tb.dut.i[i]), 4)
self.assertEqual(flen(self.tb.dut.o[i]), 4)
def test_sort(self):
def cb(tb, tbp):
for i in tb.dut.i:
tbp.simulator.wr(i, randrange(1<<flen(i)))
self.assertEqual(sorted(list(tbp.dut.i)), list(tbp.dut.o))
self.run_with(cb, 20)
|
[
"sebastien@milkymist.org"
] |
sebastien@milkymist.org
|
634c722b3755f68c71a2049285d7c29e6e4b3ca9
|
3633bab8066f576c8bf9e7908afe30bb070d0b70
|
/Hack-ninth-week/1-Money-In-The-Bank/Client.py
|
f57203098ff116bb9ce1ebabcf4a1af2776aa555
|
[] |
no_license
|
6desislava6/Hack-Bulgaria
|
099c195e45a443cf4a3342eff6612ac2aa66565b
|
de4bf7baae35e21d6a7b27d4bde68247bb85b67a
|
refs/heads/master
| 2021-01-20T11:57:29.027595
| 2015-06-02T17:36:59
| 2015-06-02T17:36:59
| 32,828,816
| 4
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 658
|
py
|
class Client():
def __init__(self, id, username, balance, message, email):
self.__username = username
self.__balance = balance
self.__id = id
self.__message = message
self.__email = email
def get_username(self):
return self.__username
def get_balance(self):
return self.__balance
def get_id(self):
return self.__id
def get_message(self):
return self.__message
def set_message(self, new_message):
self.__message = new_message
def set_email(self, new_email):
self.__email = new_email
def get_email(self):
return self.__email
|
[
"desislavatsvetkova@mail.bg"
] |
desislavatsvetkova@mail.bg
|
86b9eb36ba14748eb10a6c8ae0c92d61abc315bf
|
c6d852e5842cf6f74123445d20ff03876377ae26
|
/lemon/python22/lemon_06_190828_for_while_函数/优秀作业_0828/homework_6.py
|
f4d0cbc78178c71c569b3205de28dd577e11abb0
|
[] |
no_license
|
songyongzhuang/PythonCode_office
|
0b3d35ca5d58bc305ae90fea8b1e8c7214619979
|
cfadd3132c2c7c518c784589e0dab6510a662a6c
|
refs/heads/master
| 2023-02-13T14:06:10.610935
| 2021-01-14T09:11:32
| 2021-01-14T09:11:32
| 327,183,429
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,451
|
py
|
# _*_ coding: UTF-8 _*_
# @Time :2019-08-29 09:18
# @Author :清莲
# @FileName :homework_6.py
# @Software :PyCharm
# 题一:求出三个整数中的最大值
list = []
i = 0
print("输入三个整数,最后返回你最大的数字~")
while i != 3:
try:
num = int(input())
list.append(num)
i += 1
except:
print("请输入整型数字")
print("最大的数字是", max(list))
# 题二:打印九九乘法表
print("\n九九乘法表:")
i = 1
while i != 10:
j = 1
while j <= i:
print("{0} * {1} = {2}".format(j, i, i * j), end='\t')
j += 1
print()
i += 1
# 删除列表中元素
black_list = ['卖茶叶', '卖面膜', '卖保险', '卖花生', '卖手机']
black_list.clear()
"""第二种方法
black_list = ['卖茶叶', '卖面膜', '卖保险', '卖花生', '卖手机']
del black_list
black_list = []
"""
"""第三种方法:我猜实际希望操作为通过循环一个一个删除
black_list = ['卖茶叶', '卖面膜', '卖保险', '卖花生', '卖手机']
for i in range(black_list.__len__()):
black_list.pop()
"""
# 题四:使用循环实现排序
# 经典排序算法:冒泡、选择、插排、归并、希尔、快排、堆排,我就写三个最基础的吧
def bubbleSort(arr): # 冒泡
for i in range(1, len(arr)):
for j in range(0, len(arr) - i):
if arr[j] > arr[j + 1]:
arr[j], arr[j + 1] = arr[j + 1], arr[j]
return arr
def selectionSort(arr): # 选择
for i in range(len(arr) - 1):
minIndex = i
for j in range(i + 1, len(arr)):
if arr[j] < arr[minIndex]:
minIndex = j
if i != minIndex:
arr[i], arr[minIndex] = arr[minIndex], arr[i]
return arr
def insertionSort(arr): # 插排
for i in range(len(arr)):
preIndex = i - 1
current = arr[i]
while preIndex >= 0 and arr[preIndex] > current:
arr[preIndex + 1] = arr[preIndex]
preIndex -= 1
arr[preIndex + 1] = current
return arr
a = [1, 7, 4, 89, 34, 2]
print("\n排序后的a:", bubbleSort(a))
# 题五:定义函数判断是否登录成功
def setUp(user, password):
if user == 'lemon' and password == 'best':
print("登录系统成功")
else:
print("用户名或密码错误")
user = input("\n用户名:")
password = input("密码:")
setUp(user, password)
|
[
"songyongzhuang9@163.com"
] |
songyongzhuang9@163.com
|
48b7314ccc78a5208c9f222e43dc1dfa9beb3baf
|
0b69a011c9ffee099841c140be95ed93c704fb07
|
/problemsets/Codeforces/Python/A1207.py
|
b1961a8fecbfbe5bbae1031de06c32ef6f6ef39a
|
[
"Apache-2.0"
] |
permissive
|
juarezpaulino/coderemite
|
4bd03f4f2780eb6013f07c396ba16aa7dbbceea8
|
a4649d3f3a89d234457032d14a6646b3af339ac1
|
refs/heads/main
| 2023-01-31T11:35:19.779668
| 2020-12-18T01:33:46
| 2020-12-18T01:33:46
| 320,931,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
"""
*
* Author: Juarez Paulino(coderemite)
* Email: juarez.paulino@gmail.com
*
"""
for _ in '0'*int(input()):
b,p,f=map(int,input().split())
h,c=map(int,input().split())
if h<c: h,c,p,f=c,h,f,p
b//=2; t=min(p,b); b-=t
print(h*t+c*min(b,f))
|
[
"juarez.paulino@gmail.com"
] |
juarez.paulino@gmail.com
|
453ad9e3e455b7dd53970c8dae92d54c5ff91fc4
|
72488f37a830b7a2d29be0dc98815ef3fac1250b
|
/examples/tox21/tox21_DAG.py
|
acb32eabd5cf75c632c4c636c4d0625965cda61f
|
[
"MIT"
] |
permissive
|
mhejrati/deepchem
|
d62ffebf3dfe680534ebcca528302ca31dbdf95b
|
8a35de2ec17312a8630690387e730d18b5267a93
|
refs/heads/master
| 2021-01-18T20:22:43.834707
| 2017-04-01T22:42:42
| 2017-04-01T22:42:42
| 86,959,622
| 1
| 0
| null | 2017-04-02T03:15:45
| 2017-04-02T03:15:45
| null |
UTF-8
|
Python
| false
| false
| 1,977
|
py
|
"""
Script that trains DAG models on Tox21 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.set_random_seed(123)
import deepchem as dc
# Load Tox21 dataset
tox21_tasks, tox21_datasets, transformers = dc.molnet.load_tox21(
featurizer='GraphConv')
train_dataset, valid_dataset, test_dataset = tox21_datasets
# Fit models
metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
max_atoms_train = max([mol.get_num_atoms() for mol in train_dataset.X])
max_atoms_valid = max([mol.get_num_atoms() for mol in valid_dataset.X])
max_atoms_test = max([mol.get_num_atoms() for mol in test_dataset.X])
max_atoms = max([max_atoms_train, max_atoms_valid, max_atoms_test])
transformer = dc.trans.DAGTransformer(max_atoms=max_atoms)
train_dataset.reshard(512)
train_dataset = transformer.transform(train_dataset)
valid_dataset.reshard(512)
valid_dataset = transformer.transform(valid_dataset)
test_dataset.reshard(512)
test_dataset = transformer.transform(test_dataset)
# Number of features on conv-mols
n_feat = 75
# Batch size of models
batch_size = 64
graph = dc.nn.SequentialDAGGraph(75, batch_size=batch_size, max_atoms=max_atoms)
graph.add(dc.nn.DAGLayer(30, 75, max_atoms=max_atoms))
graph.add(dc.nn.DAGGather(max_atoms=max_atoms))
model = dc.models.MultitaskGraphClassifier(
graph,
len(tox21_tasks),
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(train_dataset, nb_epoch=20, log_every_N_batches=5)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
|
[
"zqwu@stanford.edu"
] |
zqwu@stanford.edu
|
771a079115d604f1bcfedc48fe6db067bc10275b
|
048c6b84e679a3e81bf7b4980ad2b4a99781b9b7
|
/tests/unit/qm/corfunctions/spectraldensities_test.py
|
e42af2cdcb2f8907d0c9d94e242a331c08acd2bd
|
[] |
no_license
|
saayeh/quantarhei
|
9b7a7c60e1325ef783bdbc9ac4b6f33a13301802
|
b77a41272b7df0ccbcde2710bf04bf412c126a6f
|
refs/heads/master
| 2020-12-07T06:29:27.954470
| 2017-09-01T21:09:45
| 2017-09-01T21:09:45
| 66,932,421
| 0
| 0
| null | 2016-08-30T10:52:11
| 2016-08-30T10:52:11
| null |
UTF-8
|
Python
| false
| false
| 3,212
|
py
|
# -*- coding: utf-8 -*-
import unittest
import numpy
import matplotlib.pyplot as plt
"""
*******************************************************************************
Tests of the quantarhei.qm.corfunctions.spectraldensities module
*******************************************************************************
"""
from quantarhei import SpectralDensity, CorrelationFunction
from quantarhei import TimeAxis
from quantarhei import energy_units
class TestSpectralDensity(unittest.TestCase):
"""Tests spectral densities module
"""
def test_underdamped_brownian_oscillator(self):
"""Testing Underdamped Brownian oscillator spectral density
"""
par = dict(ftype="UnderdampedBrownian",
reorg = 1.0,
freq = 500.0,
gamma = 1.0/500.0)
parO = dict(ftype="OverdampedBrownian",
reorg = 200.0,
cortime = 100.0,
T = 300.0)
par["T"] = 300.0
params = []
for i in range(5):
p = par.copy()
p["freq"] = par["freq"] + (i+1)*200.0
params.append(p)
time = TimeAxis(0.0, 100000, 1.0)
#
# Adding through correlation functions
#
with energy_units("1/cm"):
sd = SpectralDensity(time, par)
cf = sd.get_CorrelationFunction(temperature=300)
#cf.plot()
tot_cf = cf
tot_cf.axis = time
for p in params:
sd = SpectralDensity(time,p)
cf = sd.get_CorrelationFunction(temperature=300)
cf.axis = time
tot_cf += cf
#tot_cf.plot(show=False)
ct = CorrelationFunction(time, parO)
tot_cf += ct
tot_sd1 = tot_cf.get_SpectralDensity()
#tot_sd1.plot(show=False)
#tt.plot()
#
# Adding through SpectralDensity
#
with energy_units("1/cm"):
sd = SpectralDensity(time, par)
ax = sd.axis
tot_sd2 = sd
for p in params:
sd = SpectralDensity(time, p)
sd.axis = ax
tot_sd2 += sd
ov = SpectralDensity(time, parO)
ov.axis = ax
tot_sd2 += ov
#tot_sd2.plot(color="-r")
numpy.testing.assert_allclose(tot_sd1.data, tot_sd2.data, atol=1.0e-3)
cf1 = tot_sd1.get_CorrelationFunction(temperature=300)
cf2 = tot_sd2.get_CorrelationFunction(temperature=300)
#cf1.plot(show=False)
#cf2.plot(color="-r", axis=[0.0, 2000,
# numpy.min(cf1.data)-numpy.max(cf1.data)*0.1,
# numpy.max(cf1.data)*1.1])
numpy.testing.assert_allclose(cf1.data, cf2.data, atol=1.0e-3)
|
[
"tmancal74@gmail.com"
] |
tmancal74@gmail.com
|
4866fa215547659f317b66100bf9c6726089084b
|
9bb78acf73e7ab74e3f85078499a4520594f060f
|
/concat_wiki.py
|
1d1a6567e67f227989255c85c72795ef42abfcda
|
[
"Apache-2.0"
] |
permissive
|
VNGResearch/crawl_news
|
e65f8ae2c4f0d7cbe51c4e072f3e9200c4490ddd
|
187dfc9fa228435669a81f20f8d4d8e7b9bdf2fd
|
refs/heads/master
| 2021-01-13T13:32:01.952767
| 2016-12-05T08:55:57
| 2016-12-05T08:55:57
| 72,624,025
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
'''The code is suing to format wiki text after used WikiExtractor for dump archieve.'''
import os, glob, pdb
dir_in = './data/wiki/text/'
dir_out = './data/wiki/'
with open(os.path.join(dir_out, 'wiki_concat.txt'), 'w') as fw:
for d in os.listdir(dir_in):
print('===================', d)
for filename in glob.iglob(os.path.join(dir_in, d) + '/wiki_*'):
#print('process {}'.format(filename))
content = ''
title = True
with open(filename) as f:
for line in f:
line = line.strip()
if line== '':
continue
if line.startswith('<doc'):
content = ''
title = True
continue
if title ==True:
title = False
continue
if line.startswith('</doc'):
fw.write(content.strip() + '\n')
#pdb.set_trace()
else:
content += ' ' + line
|
[
"thanhlct@gmail.com"
] |
thanhlct@gmail.com
|
8753e00f80c068ee6d8255bab175a8bb7d47ecc7
|
8b11fb374dca3b0515dc804aae66921201653a19
|
/checkio/logistic-golf.py
|
e5441b5be0e15166f92f0b463fbd4da08c9d9ac0
|
[
"Giftware"
] |
permissive
|
nptit/python-snippets
|
670b8d672e2ad3a6de264f21187bb497a7b0779b
|
b1eab44b50765e1710529747fd07a5ce320dd860
|
refs/heads/master
| 2021-01-25T11:27:32.989352
| 2016-04-26T17:28:22
| 2016-04-26T17:28:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
# too slow!
from itertools import*
def golf(m):
n=len(m);x=range;d=[]
for r in x(2,n+1):
for p in permutations(x(n), r):
if p[0]==0 and p[-1]==n-1 and all([m[p[i-1]][p[i]]>0 for i in x(1,len(p))]):
d.append(sum([m[p[i-1]][p[i]] for i in x(1,len(p))]))
return min(d) if d else 0
print golf(((0, 80, 58, 0), (80, 0, 71, 80), (58, 71, 0, 58), (0, 80, 58, 0))) == 116
|
[
"qpxu007@gmail.com"
] |
qpxu007@gmail.com
|
d0de9b6633cc377588297dcd15b40aac7d775ed4
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/QgSMSMpfcEebAyCye_8.py
|
06bd85d0df27fd7f46dc66b430fe2682ed749eff
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 906
|
py
|
"""
One cause for speeding is the desire to shorten the time spent traveling. In
long distance trips speeding does save an appreciable amount of time. However,
the same cannot be said about short distance trips.
Create a function that calculates the amount of time saved were you traveling
with an average speed that is _above_ the speed-limit as compared to traveling
with an average speed _exactly at_ the speed-limit.
### Examples
# The parameter's format is as follows:
# (speed limit, avg speed, distance traveled at avg speed)
time_saved(80, 90, 40) ➞ 3.3
time_saved(80, 90, 4000) ➞ 333.3
time_saved(80, 100, 40 ) ➞ 6.0
time_saved(80, 100, 10) ➞ 1.5
### Notes
* Speed = distance/time
* The time returned should be in **minutes** , not hours.
"""
def time_saved(s_lim, s_avg, d):
return round(((d/s_lim)*60) - ((d/s_avg)*60), 1)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
9b7935ea289223dec3fdf5cf5c323d2d1c109180
|
358519772669c73092f625f630722c38e1d33783
|
/DatabaseTopology/Force/G96Angle.py
|
387d9265f6154a9856b137dc70e643a014848156
|
[] |
no_license
|
minghao2016/mmtools
|
e7e61aca084498408ceae965dd6c9450ad89eafa
|
3ade988afb51cd54ee5a4067d8deaad88afbb0fe
|
refs/heads/master
| 2021-09-21T01:02:22.522187
| 2014-09-19T03:40:03
| 2014-09-19T03:40:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 720
|
py
|
from Topology.Decorators import *
from Topology.Force.AbstractAngle import *
class G96Angle(AbstractAngle):
@accepts_compatible_units(None, None, None, units.degrees, units.kilojoules_per_mole)
def __init__(self, atom1, atom2, atom3, theta, k):
"""
"""
AbstractAngle.__init__(self, atom1, atom2, atom3)
self.theta = theta
self.k = k
def getForceParameters(self):
return (self.atom1, self.atom2, self.atom3, self.theta, self.k)
def __repr__(self):
print self.atom1+' '+self.atom2+' '+ self.atom3+' '+ self.theta+' '+self.k
def __str__(self):
print self.atom1+' '+self.atom2+' '+ self.atom3+' '+ self.theta+' '+self.k
|
[
"choderaj@mskcc.org"
] |
choderaj@mskcc.org
|
6d23fa78b362bc10224a8f1806723888bb43209e
|
d725745f5c6b4ad99399aa50f368db39f5046f81
|
/angr_platforms/ebpf/arch_ebpf.py
|
ce69367fd4bf1d4c59ced9909b3599b4fed7282f
|
[
"BSD-2-Clause"
] |
permissive
|
angr/angr-platforms
|
6816d777ea4696af05290613a490e91b8daa79ea
|
06db4e6a594af47aaeb0a5071f2cdb9a8c30f7f5
|
refs/heads/master
| 2023-03-05T10:15:20.783462
| 2023-02-20T18:38:12
| 2023-02-20T18:38:12
| 86,003,468
| 60
| 28
|
BSD-2-Clause
| 2023-08-31T19:50:46
| 2017-03-23T22:28:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,582
|
py
|
from archinfo import Arch, Register, RegisterOffset, register_arch
class ArchExtendedBPF(Arch):
"""Extended BPF arch."""
name = "eBPF"
bits = 64
vex_arch = None
qemu_name = "eBPF"
ida_processor = "eBPF"
max_inst_bytes = 8
instruction_alignment = 1
register_list = [
# return value from in-kernel function, and exit value for eBPF
Register(name="R0", vex_offset=0, size=8),
# arguments from eBPF program to in-kernel function
Register(name="R1", vex_offset=8, size=8),
Register(name="R2", vex_offset=16, size=8),
Register(name="R3", vex_offset=24, size=8),
Register(name="R4", vex_offset=32, size=8),
Register(name="R5", vex_offset=40, size=8),
# callee-saved registers that in-kernel function will preserve
Register(name="R6", vex_offset=48, size=8),
Register(name="R7", vex_offset=56, size=8),
Register(name="R8", vex_offset=64, size=8),
Register(name="R9", vex_offset=72, size=8),
# read-only frame pointer to access stack
Register(
name="R10",
vex_offset=80,
size=8,
default_value=(Arch.initial_sp, True, (Arch.initial_sp, "stack")),
),
# syscall number extracted from instr
Register(name="syscall", vex_offset=88, size=8),
Register(name="ip", vex_offset=96, size=8),
Register(name="ip_at_syscall", vex_offset=104, size=8),
]
bp_offset = RegisterOffset(80)
register_arch(["eBPF", "em_bpf"], 64, "any", ArchExtendedBPF)
|
[
"noreply@github.com"
] |
angr.noreply@github.com
|
ff642b72630d63d5b705af2645e7dff9048fd4f1
|
2aba62d66c2c622bdc148cef451da76cae5fd76c
|
/exercise/learn_python_dm2039/ch16/ch16_29.py
|
76ee38bf4b12556373fe7be46cb6fc70a6f66f03
|
[] |
no_license
|
NTUT-109AB8011/crawler
|
6a76de2ab1848ebc8365e071e76c08ca7348be62
|
a703ec741b48d3af615a757fed7607b1f8eb66a6
|
refs/heads/master
| 2023-03-26T22:39:59.527175
| 2021-03-30T03:29:22
| 2021-03-30T03:29:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
# ch16_29.py
import re
# 測試1搜尋開始到結尾皆是數字的字串
msg = '09282028222'
pattern = '^\d+$'
txt = re.findall(pattern,msg) # 傳回搜尋結果
print(txt)
# 測試2搜尋開始到結尾皆是數字的字串
msg = '0928tuyr990'
pattern = '^\d+$'
txt = re.findall(pattern,msg) # 傳回搜尋結果
print(txt)
|
[
"terranandes@gmail.com"
] |
terranandes@gmail.com
|
0aae5c8f8123a150649e4799b4773a3c13888325
|
b37e2bc89e3e3191194a6060e4bf7cef71482695
|
/train_vae.py
|
508b92a2dc58a2ee93c809857e5c895b577f7518
|
[
"MIT"
] |
permissive
|
biandh/DALLE-pytorch
|
b10bbc590c54b04fa60d2653d6934db86ee2633a
|
c2ccaa48b43fbb5c29b833c8cae082a797ffc8b5
|
refs/heads/main
| 2023-03-10T00:11:50.750174
| 2021-02-20T04:01:55
| 2021-02-20T04:01:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,489
|
py
|
import math
from math import sqrt
import argparse
import torch
from torch.optim import Adam
from torch.optim.lr_scheduler import ExponentialLR
# vision imports
from torchvision import transforms as T
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from torchvision.utils import make_grid, save_image
# dalle classes
from dalle_pytorch import DiscreteVAE
# argument parsing
parser = argparse.ArgumentParser()
parser.add_argument('--image_folder', type = str, required = True,
help='path to your folder of images for learning the discrete VAE and its codebook')
parser.add_argument('--image_size', type = int, required = False, default = 128,
help='image size')
args = parser.parse_args()
# constants
IMAGE_SIZE = args.image_size
IMAGE_PATH = args.image_folder
EPOCHS = 20
BATCH_SIZE = 8
LEARNING_RATE = 1e-3
LR_DECAY_RATE = 0.98
NUM_TOKENS = 8192
NUM_LAYERS = 2
NUM_RESNET_BLOCKS = 2
SMOOTH_L1_LOSS = False
EMB_DIM = 512
HID_DIM = 256
KL_LOSS_WEIGHT = 0
STARTING_TEMP = 1.
TEMP_MIN = 0.5
ANNEAL_RATE = 1e-6
NUM_IMAGES_SAVE = 4
# data
ds = ImageFolder(
IMAGE_PATH,
T.Compose([
T.Resize(IMAGE_SIZE),
T.CenterCrop(IMAGE_SIZE),
T.ToTensor(),
T.Normalize((0.5,) * 3, (0.5,) * 3)
])
)
dl = DataLoader(ds, BATCH_SIZE, shuffle = True)
vae_params = dict(
image_size = IMAGE_SIZE,
num_layers = NUM_LAYERS,
num_tokens = NUM_TOKENS,
codebook_dim = EMB_DIM,
hidden_dim = HID_DIM,
num_resnet_blocks = NUM_RESNET_BLOCKS
)
vae = DiscreteVAE(
**vae_params,
smooth_l1_loss = SMOOTH_L1_LOSS,
kl_div_loss_weight = KL_LOSS_WEIGHT
).cuda()
assert len(ds) > 0, 'folder does not contain any images'
print(f'{len(ds)} images found for training')
# optimizer
opt = Adam(vae.parameters(), lr = LEARNING_RATE)
sched = ExponentialLR(optimizer = opt, gamma = LR_DECAY_RATE)
# weights & biases experiment tracking
import wandb
wandb.config.num_tokens = NUM_TOKENS
wandb.config.smooth_l1_loss = SMOOTH_L1_LOSS
wandb.config.num_resnet_blocks = NUM_RESNET_BLOCKS
wandb.config.kl_loss_weight = KL_LOSS_WEIGHT
wandb.init(project='dalle_train_vae')
# starting temperature
global_step = 0
temp = STARTING_TEMP
for epoch in range(EPOCHS):
for i, (images, _) in enumerate(dl):
images = images.cuda()
loss, recons = vae(
images,
return_loss = True,
return_recons = True,
temp = temp
)
opt.zero_grad()
loss.backward()
opt.step()
logs = {}
if i % 100 == 0:
k = NUM_IMAGES_SAVE
with torch.no_grad():
codes = vae.get_codebook_indices(images[:k])
hard_recons = vae.decode(codes)
images, recons = map(lambda t: t[:k], (images, recons))
images, recons, hard_recons, codes = map(lambda t: t.detach().cpu(), (images, recons, hard_recons, codes))
images, recons, hard_recons = map(lambda t: make_grid(t, nrow = int(sqrt(k)), normalize = True, range = (-1, 1)), (images, recons, hard_recons))
logs = {
**logs,
'sample images': wandb.Image(images, caption = 'original images'),
'reconstructions': wandb.Image(recons, caption = 'reconstructions'),
'hard reconstructions': wandb.Image(hard_recons, caption = 'hard reconstructions'),
'codebook_indices': wandb.Histogram(codes),
'temperature': temp
}
save_obj = {
'hparams': vae_params,
'weights': vae.state_dict()
}
torch.save(save_obj, f'vae.pt')
wandb.save('./vae.pt')
# temperature anneal
temp = max(temp * math.exp(-ANNEAL_RATE * global_step), TEMP_MIN)
# lr decay
sched.step()
if i % 10 == 0:
lr = sched.get_last_lr()[0]
print(epoch, i, f'lr - {lr:6f} loss - {loss.item()}')
logs = {
**logs,
'epoch': epoch,
'iter': i,
'loss': loss.item(),
'lr': lr
}
wandb.log(logs)
global_step += 1
# save final vae and cleanup
save_obj = {
'hparams': vae_params,
'weights': vae.state_dict()
}
torch.save(save_obj, 'vae-final.pt')
wandb.save('./vae-final.pt')
wandb.finish()
|
[
"lucidrains@gmail.com"
] |
lucidrains@gmail.com
|
e211d58c9098c0d358cbab093986f7c079d0f6cf
|
c24fef69a42ac1da33c892eb85c955acc743354c
|
/multithreading_multiprocess/sell_ticket_with_deadlock.py
|
e37cca3733afac501890904587b122309c47a7c3
|
[] |
no_license
|
Arithmeticjia/leetcode-python
|
e3c1d5c5a2733c56637ee2fb51222c7465dc6425
|
a24869d88cb41e53fb0abe482ba87dd1e54b2167
|
refs/heads/master
| 2021-08-15T21:55:11.511687
| 2021-01-03T03:50:54
| 2021-01-03T03:50:54
| 237,393,853
| 1
| 0
| null | null | null | null |
GB18030
|
Python
| false
| false
| 641
|
py
|
# coding:gbk
import threading
import time
total = 5 # 总共的票数
lock = threading.Lock() # 创建不可重入互斥锁
# rlock = threading.RLock() # 创建可重入互斥锁
def sale():
global total
lock.acquire()
lock.acquire()
time.sleep(1)
print('正在售出第%s张票\n' % total)
time.sleep(1)
total -= 1
lock.release()
lock.release()
if __name__ == '__main__':
threads = []
for i in range(5): # 创建5个线程,代表5个售票窗口
t = threading.Thread(target=sale, args=())
threads.append(t)
for t in threads: # 开始售票
t.start()
|
[
"1097197237@qq.com"
] |
1097197237@qq.com
|
2c831da32af3407d2f3ad1ee95dcb867b48d2bb7
|
2a54e8d6ed124c64abb9e075cc5524bb859ba0fa
|
/.history/3-OO-Python/2-attributes-methods_20200415002528.py
|
405b115b79f2213ab1fda091ba07277c5d9961aa
|
[] |
no_license
|
CaptainStorm21/Python-Foundation
|
01b5fbaf7a913506518cf22e0339dd948e65cea1
|
a385adeda74f43dd7fb2d99d326b0be23db25024
|
refs/heads/master
| 2021-05-23T01:29:18.885239
| 2020-04-23T19:18:06
| 2020-04-23T19:18:06
| 253,171,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 580
|
py
|
# OOP
class PlayerCharacter :
# class object attribute - it is static
membership = True
# constructor method / init method
def __init__(self, name, age):
self.name = name #attributes
self.age = age
def run (self):
print('running')
return 'Workout is done'
player1 = PlayerCharacter('Josh', 23)
player2 = PlayerCharacter('Morgan', 22)
# player2.attack=('Player is attacking !')
#blueprint of the object
# help(player1)
# help(list)
#attributes - dynamic data - F.E, name, age
print(player1.membership)
|
[
"tikana4@yahoo.com"
] |
tikana4@yahoo.com
|
8cde96fc88144ab64f34b48cdae3f18a63571685
|
3b31c39ab8269aa2d7060051db6ecab97e49aa8d
|
/mysite2/medicine/views.py
|
bddab105eba892c31415956a7d26473169709ddd
|
[] |
no_license
|
seiya0723/medicine_checker_04
|
ce2890890955f4c7ab1eef4a71b657963945d6ea
|
cd6c11e25b61a056097fd02ad37a8f11e7db7e31
|
refs/heads/master
| 2023-06-11T16:53:08.007479
| 2021-07-03T04:44:24
| 2021-07-03T04:44:24
| 382,526,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,515
|
py
|
from django.shortcuts import render,redirect
from django.views import View
#クエリビルダ(複雑な検索処理を行う事ができる)
from django.db.models import Q
#JavaScript用にJSONレスポンスを返す
from django.http.response import JsonResponse
#レンダリングした後、文字列型にして返す
from django.template.loader import render_to_string
from .models import Medicine
#正規表現を使うので、インポート
import re
class IndexView(View):
def get(self, request, *args, **kwargs):
#何も書かれていない医薬品の炙り出し。
medicines = Medicine.objects.filter(effect="",caution="",dosage="",side_effect="")
print(len(list(medicines.values())))
#医薬品の開発会社が違うだけで中身は同じ(「」で囲まれた部分を除外し、比較。一致しているものを表示もしくは削除する。)
medicines = Medicine.objects.all().exclude(effect="",caution="",dosage="",side_effect="").order_by("name")
duplicate = 0
old_name = ""
for medicine in medicines:
new_name = medicine.name
#print("変更前:" + new_name)
#print("変更後:" + re.sub("「.*」","",new_name))
new_name = re.sub("「.*」","",new_name)
if old_name == new_name:
#print("重複している")
#TODO:ここで重複したmedicineのIDを記録する。
duplicate += 1
old_name = new_name
print("重複している数" + str(duplicate))
#約22200行 → 重複と説明なしの医薬品除外 → 約9400行
#Herokuの運用も可能になる。
return render(request,"medicine/index.html")
index = IndexView.as_view()
#Jsonでレスポンスを返す。
class SearchView(View):
def get(self, request, *args, **kwargs):
json = {"error":True}
if "search" in request.GET:
#(1)キーワードが空欄もしくはスペースのみの場合、ページにリダイレクト
if request.GET["search"] == "" or request.GET["search"].isspace():
#リダイレクトではなくAjaxで送信されているのでjsonで返す。
#return redirect("medicine:index")
return JsonResponse(json)
#チェックボックスがいずれも押されていない場合検索しない(全件出力され、処理が遅くなる)
if "name" not in request.GET and "effect" not in request.GET and "caution" not in request.GET and "dosage" not in request.GET and "side_effect" not in request.GET:
#リダイレクトではなくAjaxで送信されているのでjsonで返す。
#return redirect("medicine:index")
return JsonResponse(json)
#(2)キーワードをリスト化させる(複数指定の場合に対応させるため)
search = request.GET["search"].replace(" "," ")
search_list = search.split(" ")
#(3)クエリを作る
query = Q()
for word in search_list:
if word == "":
continue
#TIPS:AND検索の場合は&を、OR検索の場合は|を使用する。
if "name" in request.GET:
query |= Q(name__contains=word)
if "effect" in request.GET:
query |= Q(effect__contains=word)
if "caution" in request.GET:
query |= Q(caution__contains=word)
if "dosage" in request.GET:
query |= Q(dosage__contains=word)
if "side_effect" in request.GET:
query |= Q(side_effect__contains=word)
#(4)作ったクエリを実行
medicines = Medicine.objects.filter(query)
else:
medicines = []
context = { "medicines":medicines }
#検索結果のレンダリングを文字列型にして返す。
content = render_to_string("medicine/search.html",context,request)
#エラーフラグをFalseにして、検索結果のHTML(文字列型)のデータをJSON形式でレスポンス、JSに引き渡す。
json["error"] = False
json["content"] = content
return JsonResponse(json)
search = SearchView.as_view()
#テーブルにスタックする時、医薬品単体のデータを返す。
class SingleView(View):
def get(self, request, pk, *args, **kwargs):
print("single")
json = { "error":True }
#pkから医薬品情報一件を抜き取る、JSONで返すので辞書型に書き換え。
medicine = Medicine.objects.filter(id=pk).first()
#医薬品情報が無い場合はエラーを返す。
if not medicine:
return JsonResponse(json)
#json形式で送信できるように辞書型に変換する
dic = {}
dic["name"] = medicine.name
dic["effect"] = medicine.effect
dic["caution"] = medicine.caution
dic["dosage"] = medicine.dosage
dic["side_effect"] = medicine.side_effect
json["error"] = False
json["medicine"] = dic
return JsonResponse(json)
single = SingleView.as_view()
|
[
"seiya@asahina"
] |
seiya@asahina
|
e676704593b04666cd09b4f9bc98470a844ee2c9
|
401abd0e20319927ef2d6aba5940f1e5ee0b4ee5
|
/cwd.py
|
5449b38ff831a97bf3788730e8d4a44bba4d7eae
|
[] |
no_license
|
OctopusHugz/checker_hack_day
|
a821dc789212fbd5c21de34e50f6817c25bdca8a
|
fcab122bfc784720d41440357b3a6cd79f4e0986
|
refs/heads/master
| 2023-06-26T01:41:00.744815
| 2021-07-27T17:39:58
| 2021-07-27T17:39:58
| 300,801,217
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,194
|
py
|
#!/usr/bin/env python3
from os import getcwd, path
from projects import (low_list, high_list, sedo_list,
interview_list, web_front_end_list, web_back_end_list,
web_react_list)
def pid_from_cwd():
"""Returns a project's ID based on the current working directory"""
cwd = path.basename(getcwd())
projects = {
"low": low_list,
"high": high_list,
"sedo": sedo_list,
"interview": interview_list,
"web_front_end": web_front_end_list,
"web_back_end": web_back_end_list,
"web_react": web_react_list
}
all_projects = list(projects.values())
# all projects is list of list of dicts where each dict is a project
for track in all_projects:
# track is a list of dicts where each dict is a project in that track
for project in track:
project_dir = list(project.values())[0]
project_id = list(project.keys())[0]
if cwd == project_dir:
return project_id
def parent_from_cwd():
"""Returns the parent directory based on the current working directory"""
parent = getcwd().split('/')[-2]
return parent
|
[
"colsonscott53@gmail.com"
] |
colsonscott53@gmail.com
|
6aa5a163b90b39c0ac27f13bb82e2ae042d17542
|
36222fc73431a89d41a342aa176158b8868bc41a
|
/accounts/migrations/0051_auto_20170412_1628.py
|
3be2038a4dd7ec662e47fd8ab1e7a964244568c5
|
[] |
no_license
|
dxviidmg/CITNOVA
|
9e3f555e192d4e875fc4b990b70c52e3f6fc8bc0
|
f18d6e74082d0ddf58eaba439d5e20f2d48af7b9
|
refs/heads/master
| 2021-01-18T23:34:41.179481
| 2017-05-20T13:59:11
| 2017-05-20T13:59:11
| 87,117,216
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 860
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-12 21:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0050_auto_20170412_1622'),
]
operations = [
migrations.AlterField(
model_name='expediente',
name='tipo',
field=models.CharField(choices=[('P. Física', 'P. Física'), ('P. Moral', 'P. Moral')], max_length=20),
),
migrations.AlterField(
model_name='perfil',
name='grado_profesional',
field=models.CharField(blank=True, choices=[('Mtro(a).', 'Mtro(a).'), ('Lic.', 'Lic.'), ('Tec.', 'Tec.'), ('Dr.', 'Dr.'), ('Arq.', 'Arq.'), ('Ing.', 'Ing.'), ('T. S. U.', 'T. S. U.')], default='C.', max_length=30),
),
]
|
[
"dmg_92@hotmail.com"
] |
dmg_92@hotmail.com
|
c3685030a33aff5508370080b26a5986fe2b03b1
|
31d10cf8f83fd04281f0e108ba0c9ed193f9ed7b
|
/0x01-python-if_else_loops_functions/6-print_comb3.py~
|
8a959951e4859dd39d222a64ffa40480d74aa967
|
[] |
no_license
|
RodrigoSierraV/holbertonschool-higher_level_programming
|
822d41587c6336d363dd41609960a7ca23700fc2
|
7c671b5c0c46e2def8ccab760d7ceca1ca07702f
|
refs/heads/master
| 2020-05-18T03:37:20.032499
| 2019-10-23T02:25:34
| 2019-10-23T02:25:34
| 184,111,862
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
#!/usr/bin/python3
for i in range(0, 9 + 1):
for b in range(0, 9 + 1):
if b > i and (b + i) < 17:
print("{:d}{:d}, ".format(i, b), end="")
elif + i == 17:
print("{:d}{:d}".format(i, b))
|
[
"735@holbertonschool.com"
] |
735@holbertonschool.com
|
|
1a48019324f811a2932ab415786dec956df484f8
|
f36b733f9c24d4cabd0d3354e0344094fbf3c026
|
/a10_saltstack/helpers/helper_modules/a10_cgnv6_lsn_port_overloading.py
|
109481f957d3b7d3b6f10485242a324a36f8fd11
|
[
"Apache-2.0"
] |
permissive
|
a10networks/a10-saltstack
|
08e13647e0187b09500ed3d9053ae06e7e808746
|
0d86043b1d09e75ea170e72fac5068254fc4037c
|
refs/heads/master
| 2021-03-19T16:11:14.211706
| 2019-07-24T17:18:04
| 2019-07-24T17:18:04
| 123,501,933
| 2
| 3
| null | 2019-07-24T17:18:05
| 2018-03-01T22:55:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,442
|
py
|
# Copyright 2019 A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Hacky way of having access to object properties for evaluation
AVAILABLE_PROPERTIES = ["global","tcp","udp",]
REF_PROPERTIES = {
"global": "/axapi/v3/cgnv6/lsn/port-overloading/global",
"tcp": "/axapi/v3/cgnv6/lsn/port-overloading/tcp",
"udp": "/axapi/v3/cgnv6/lsn/port-overloading/udp",
}
MODULE_NAME = "port-overloading"
PARENT_KEYS = []
CHILD_KEYS = []
def new_url(**kwargs):
"""Return the URL for creating a resource"""
# To create the URL, we need to take the format string and return it with no params
url_base = "/axapi/v3/cgnv6/lsn/port-overloading"
f_dict = {}
return url_base.format(**f_dict)
def existing_url(**kwargs):
"""Return the URL for an existing resource"""
# Build the format dictionary
url_base = "/axapi/v3/cgnv6/lsn/port-overloading"
f_dict = {}
return url_base.format(**f_dict)
|
[
"thompson.grey.hunter@gmail.com"
] |
thompson.grey.hunter@gmail.com
|
7febc115f14bfed876325b00ff64fcedfa4ca80e
|
56e626db1b367f30e6978f5a5d573618823e9b6c
|
/train/train_multi_class_classify.py
|
90931424639fe42f6842c9bf8a755ce8c6cee098
|
[
"MIT"
] |
permissive
|
witnesslq/transwarp-nlp
|
d9bdf53b8ded3ac07196b4ba82346429caeb5be8
|
fc324253e9eff7d9d365ebb85ba81680bbe86f5f
|
refs/heads/master
| 2021-01-21T09:39:31.373777
| 2017-05-11T12:51:43
| 2017-05-11T12:51:43
| 91,663,587
| 6
| 2
| null | 2017-05-18T07:39:42
| 2017-05-18T07:39:42
| null |
UTF-8
|
Python
| false
| false
| 4,626
|
py
|
# -*- coding: utf-8 -*-
import cPickle
import numpy as np
import tensorflow as tf
import os, time
from transwarpnlp.multi_class_classify.cnn_config import CnnConfig
from transwarpnlp.multi_class_classify import model_cnn
config = CnnConfig()
pkg_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def train_cnn_classfier(train_path):
print("loading data...")
x = cPickle.load(open(os.path.join(train_path, "model/mr.txt"), "rb"))
# 读取出预处理后的数据 revs {"y":label,"text":"word1 word2 ..."}
# word_idx_map["word"]==>index
# vocab["word"]==>frequency
revs, _, _, word_idx_map, idx_word_map, vocab = x[0], x[1], x[2], x[3], x[4], x[5]
print("data loaded!")
revs = np.random.permutation(revs) # 原始的sample正负样本是分别聚在一起的,这里随机打散
n_batches = len(revs) / config.batch_size #
n_train_batches = int(np.round(n_batches * 0.9))
# 开始定义模型============================================
with tf.Graph().as_default(), tf.Session().as_default() as sess:
# 占位符 真实的输入输出
x_in = tf.placeholder(tf.int64, shape=[None, config.sentence_length], name="input_x")
y_in = tf.placeholder(tf.int64, [None], name="input_y")
keep_prob = tf.placeholder(tf.float32)
# 构建模型
loss, accuracy, embeddings = model_cnn.build_model(x_in, y_in, keep_prob)
# 训练模型========================================
num_steps = 10
global_step = tf.Variable(0)
learning_rate = tf.train.exponential_decay(1e-4, global_step, num_steps, 0.99, staircase=True) # 学习率递减
train_step = tf.train.AdagradOptimizer(learning_rate).minimize(loss, global_step=global_step)
# summaries,====================
timestamp = str(int(time.time()))
out_dir = os.path.join(train_path, "summary", timestamp)
print("Writing to {}\n".format(out_dir))
loss_summary = tf.summary.scalar("loss", loss)
acc_summary = tf.summary.scalar("accuracy", accuracy)
train_summary_op = tf.summary.merge([loss_summary, acc_summary])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
checkpoint_dir = os.path.join(train_path, "ckpt")
checkpoint_prefix = os.path.join(checkpoint_dir, "classify")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables())
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
current_step = tf.train.global_step(sess, global_step)
print("current_step:", current_step)
if num_steps > int(current_step / 135):
num_steps = num_steps - int(current_step / 135)
print("continute step:", num_steps)
else:
num_steps = 0
batch_x_test, batch_y_test = model_cnn.get_test_batch(revs, word_idx_map)
for i in range(num_steps):
for minibatch_index in np.random.permutation(range(n_train_batches)): # 随机打散 每次输入的样本的顺序都不一样
batch_x, batch_y = model_cnn.generate_batch(revs, word_idx_map, minibatch_index)
# train_step.run(feed_dict={x_in: batch_x, y_in: batch_y, keep_prob: 0.5})
feed_dict = {x_in: batch_x, y_in: batch_y, keep_prob: 0.5}
_, step, summaries = sess.run([train_step, global_step, train_summary_op], feed_dict)
train_summary_writer.add_summary(summaries, step)
train_accuracy = accuracy.eval(feed_dict={x_in: batch_x_test, y_in: batch_y_test, keep_prob: 1.0})
current_step = tf.train.global_step(sess, global_step)
print("Update step %d, training accuracy %g" % (current_step, train_accuracy))
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
print("Saved model checkpoint to {}\n".format(path))
return embeddings, sess, idx_word_map
if __name__ == "__main__":
train_path = os.path.join(pkg_path, "data/multi_class_classify")
embeddings, sess, idx_word_map = train_cnn_classfier(train_path)
final_embeddings = model_cnn.word2vec(embeddings, train_path, sess)
# cnn_classfier.display_word2vec(final_embeddings, idx_word_map)
|
[
"endymecy@sina.cn"
] |
endymecy@sina.cn
|
4cdf34e45cb5d8eaa9a0dc255e0b2e23dca732a5
|
0bb49acb7bb13a09adafc2e43e339f4c956e17a6
|
/OpenNodes/OpenProject/addComment.py
|
20008f8c48e3a22a8264afab4a51366f970cea38
|
[] |
no_license
|
all-in-one-of/openassembler-7
|
94f6cdc866bceb844246de7920b7cbff9fcc69bf
|
69704d1c4aa4b1b99f484c8c7884cf73d412fafe
|
refs/heads/master
| 2021-01-04T18:08:10.264830
| 2010-07-02T10:50:16
| 2010-07-02T10:50:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,495
|
py
|
###OpenAssembler Node python file###
'''
define
{
name addComment
tags opdb
input dbPath Path "" ""
input string Comment "" ""
output int result "" ""
}
'''
import os, sys,time
from Setup import opdb_setup
from getCleanPath import getCleanPath
class addComment(opdb_setup,getCleanPath):
def addComment_main(self, **connections):
try:
Path=connections["Path"]
except:
Path=""
try:
Comment=connections["Comment"]
except:
Comment=""
try:
oas_output=connections["oas_output"]
except:
oas_output="result"
if oas_output=="result":
try:
readed=""
ProjectROOT=self.opdb_projects_settings(self.opdb_setup_read())
cleanpath=self.getCleanPath_main(Path=Path)
if cleanpath==0:
return 0
if str(cleanpath)==str(Path):
return 0
path=ProjectROOT+cleanpath.replace(":","/")
ltime=time.strftime("%Y%m%d%H%M%S",time.gmtime())
cuser=""
if os.name=="nt":
cuser=os.environ.get("USERNAME")
else:
cuser=os.environ.get("USER")
if os.path.isfile(path+"/comments.atr"):
pf=open(path+"/comments.atr","r")
readed=pf.read()
pf.close()
readed=readed.strip().lstrip()
ver=Path.split("@")[1]
comm=str(Comment).strip().lstrip().replace("\n"," | ").replace("\r","")
newline="- "+str(cuser)+" "+str(ver)+" "+str(ltime)+" || "+comm
textbody=newline+"\n"+readed+"\n"
pf=open(path+"/comments.atr","w")
pf.write(textbody)
pf.close()
return 1
except:
return 0
else:
return 0
|
[
"laszlo.mates@732492aa-5b49-0410-a19c-07a6d82ec771"
] |
laszlo.mates@732492aa-5b49-0410-a19c-07a6d82ec771
|
4b5832a605f16bffdeb23750b8ef37198504d3b2
|
c0c8aeb5aaf08925d8c9e1d660b02c89cbc7ad71
|
/Algorithms/Medium/47. Permutations II/answer.py
|
2df5db8def3857a1e5ac90f79f3ef6b455e5fc92
|
[
"Apache-2.0"
] |
permissive
|
kenwoov/PlayLeetCode
|
b2fdc43d799c37683a9efdc31c4df159cf553bf5
|
4012a2f0a099a502df1e5df2e39faa75fe6463e8
|
refs/heads/master
| 2022-12-17T05:54:22.775972
| 2020-09-26T14:08:43
| 2020-09-26T14:08:43
| 214,839,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
from typing import List
class Solution:
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
res = []
def dfs(n, r, path):
if not n and path not in r:
r.append(path)
else:
for i in range(len(n)):
dfs(n[:i]+n[i+1:], r, path+[n[i]])
dfs(nums, res, [])
return res
if __name__ == "__main__":
s = Solution()
result = s.permuteUnique([1,1,2])
print(result)
|
[
"kenwoov@outlook.com"
] |
kenwoov@outlook.com
|
f2c2cb9e12e923d4a06e08ac3ca969bebfed1aa3
|
1b1b5908dce757e9aa638507baa788f0f0e16611
|
/rango/migrations/0008_auto_20170101_2008.py
|
b86f6213843fb8e6e9fb55a28bd53b135c7abe3a
|
[] |
no_license
|
mish24/Rango
|
5223f1c43ce74e2f336d95a3f03621b05d5af049
|
fea0b6364bc265cdc09c75f745dd317fd38c0422
|
refs/heads/master
| 2021-04-28T21:34:44.014904
| 2017-01-04T21:27:26
| 2017-01-04T21:27:26
| 77,769,000
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-01 20:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rango', '0007_auto_20170101_1804'),
]
operations = [
migrations.AlterField(
model_name='category',
name='slug',
field=models.SlugField(unique=True),
),
]
|
[
"poonammishra24398@gmail.com"
] |
poonammishra24398@gmail.com
|
2769fde5a6fcf7fc0bb42f8ee954d96b8448f6c5
|
df20743069e3c81128438ecc8a368b1853dc8137
|
/overrides/scr/Spell1141 - Lawful Sword.py
|
bb31be7e81e6a900b07745035c61feb4e1f6a0d0
|
[
"MIT"
] |
permissive
|
dolio/ToEE_Mods
|
3f020d82e590a63a04047912d8d76fa2212957d7
|
53aa8086b89b25d7afb3104c5d8896c8a38c89b0
|
refs/heads/main
| 2023-04-09T06:17:47.064224
| 2021-04-29T09:41:58
| 2021-04-29T09:41:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 699
|
py
|
from toee import *
def OnBeginSpellCast(spell):
print "Lawful Sword OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
def OnSpellEffect(spell):
print "Lawful Sword OnSpellEffect"
spell.duration = 1 * spell.caster_level
spellTarget = spell.target_list[0]
spellTarget.obj.condition_add_with_args('sp-Lawful Sword', spell.id, spell.duration)
spellTarget.partsys_id = game.particles('sp-Heroism', spellTarget.obj)
spell.spell_end(spell.id)
def OnBeginRound(spell):
print "Lawful Sword OnBeginRound"
def OnEndSpellCast(spell):
print "Lawful Sword OnEndSpellCast"
|
[
"herbstgeist@googlemail.com"
] |
herbstgeist@googlemail.com
|
ed9cdc6d73f30f066bf941daa653cda3d55256f4
|
d6a87864028abde8da69b0a1075e3d4c483ed73c
|
/Reverse Integer.py
|
d8954fcc8255c6824667ffee755a63529a7904a4
|
[] |
no_license
|
Windsooon/LeetCode
|
7ef78c7e001c1e6924244869a7ba5491d33eb246
|
409d7db811d41dbcc7ce8cda82b77eff35585657
|
refs/heads/master
| 2021-01-10T15:26:16.986357
| 2020-01-01T14:57:58
| 2020-01-01T14:57:58
| 54,531,267
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
py
|
class Solution:
# edge case [-2**31, 2**31-1]
def reverse(self, x: int) -> int:
if str(x)[0] == '-':
return 0 if -int(str(x)[1:][::-1]) < -2**31 else -int(str(x)[1:][::-1])
return 0 if int(str(x)[::-1]) > 2**31-1 else int(str(x)[::-1])
|
[
"wiwindson@outlook.com"
] |
wiwindson@outlook.com
|
6f9c7fb744dc3a96ad6bb2b4190e57f301d9d99f
|
3ee0d5a2cc955c4fb5583f4b88463e783cad8e9e
|
/examples/ds3/t370401.py
|
17d5a22cc327ae9d733236fe6ad1666e4e3c0613
|
[] |
no_license
|
vawser/ESDLang
|
47b18f7f14b26ae24d8c39d20701ffb0e0017f3c
|
9455d423f4fae534abba7b98339c61e7f1350f53
|
refs/heads/master
| 2021-04-18T13:30:42.990177
| 2019-09-17T04:33:46
| 2019-09-17T04:33:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,518
|
py
|
# -*- coding: utf-8 -*-
def t370401_1():
"""State 0,1"""
assert GetCurrentStateElapsedTime() > 1
while True:
"""State 2"""
call = t370401_x10()
assert IsClientPlayer() == 1
"""State 3"""
call = t370401_x11()
assert not IsClientPlayer()
def t370401_x0(z2=6000, flag3=1575, flag4=6000, flag5=6000, flag6=6000, flag7=6000):
"""State 0"""
while True:
"""State 1"""
assert (not GetOneLineHelpStatus() and not IsTalkingToSomeoneElse() and not IsClientPlayer()
and not IsPlayerDead() and not IsCharacterDisabled())
"""State 3"""
assert (GetEventStatus(flag3) == 1 or GetEventStatus(flag4) == 1 or GetEventStatus(flag5) ==
1 or GetEventStatus(flag6) == 1 or GetEventStatus(flag7) == 1)
"""State 2"""
if (not (not GetOneLineHelpStatus() and not IsTalkingToSomeoneElse() and not IsClientPlayer()
and not IsPlayerDead() and not IsCharacterDisabled())):
pass
elif (not GetEventStatus(flag3) and not GetEventStatus(flag4) and not GetEventStatus(flag5) and
not GetEventStatus(flag6) and not GetEventStatus(flag7)):
pass
elif CheckActionButtonArea(z2):
break
"""State 4"""
return 0
def t370401_x1():
"""State 0,1"""
if not CheckSpecificPersonTalkHasEnded(0):
"""State 7"""
ClearTalkProgressData()
StopEventAnimWithoutForcingConversationEnd(0)
"""State 6"""
ReportConversationEndToHavokBehavior()
else:
pass
"""State 2"""
if CheckSpecificPersonGenericDialogIsOpen(0) == 1:
"""State 3"""
ForceCloseGenericDialog()
else:
pass
"""State 4"""
if CheckSpecificPersonMenuIsOpen(-1, 0) == 1 and not CheckSpecificPersonGenericDialogIsOpen(0):
"""State 5"""
ForceCloseMenu()
else:
pass
"""State 8"""
return 0
def t370401_x2():
"""State 0,1"""
ClearTalkProgressData()
StopEventAnimWithoutForcingConversationEnd(0)
ForceCloseGenericDialog()
ForceCloseMenu()
ReportConversationEndToHavokBehavior()
"""State 2"""
return 0
def t370401_x3(text2=_, z1=_, flag2=0, mode2=1):
"""State 0,5"""
assert t370401_x2() and CheckSpecificPersonTalkHasEnded(0) == 1
"""State 2"""
SetEventState(z1, 1)
"""State 1"""
TalkToPlayer(text2, -1, -1, flag2)
assert CheckSpecificPersonTalkHasEnded(0) == 1
"""State 4"""
if not mode2:
pass
else:
"""State 3"""
ReportConversationEndToHavokBehavior()
"""State 6"""
return 0
def t370401_x4(text1=_, flag1=0, mode1=1):
"""State 0,4"""
assert t370401_x2() and CheckSpecificPersonTalkHasEnded(0) == 1
"""State 1"""
TalkToPlayer(text1, -1, -1, flag1)
assert CheckSpecificPersonTalkHasEnded(0) == 1
"""State 3"""
if not mode1:
pass
else:
"""State 2"""
ReportConversationEndToHavokBehavior()
"""State 5"""
return 0
def t370401_x5():
"""State 0,1,2"""
if not GetEventStatus(50006301):
"""State 3,7"""
# talk:40000200:"Welcome, our gracious Lord."
assert t370401_x4(text1=40000200, flag1=0, mode1=1)
"""State 5"""
SetEventState(73700330, 1)
else:
"""State 4,6"""
# talk:40000300:"Your spouse awaits you, you are very near."
assert t370401_x4(text1=40000300, flag1=0, mode1=1)
"""State 8"""
return 0
def t370401_x6():
"""State 0,7"""
assert t370401_x1()
"""State 4"""
assert GetCurrentStateElapsedFrames() > 1
"""State 1"""
assert not GetEventStatus(1576) and not GetEventStatus(1577)
"""State 2"""
if GetDistanceToPlayer() < 10:
"""State 5,9"""
call = t370401_x14()
if call.Done():
pass
elif GetEventStatus(1576) == 1 or GetEventStatus(1577) == 1:
"""State 3"""
Quit()
elif GetDistanceToPlayer() > 12:
"""State 8"""
assert t370401_x1()
else:
"""State 6"""
pass
"""State 10"""
return 0
def t370401_x7():
"""State 0,1"""
if GetEventStatus(1578) == 1:
"""State 2"""
pass
else:
"""State 3"""
if GetDistanceToPlayer() < 10:
"""State 4,7"""
# talk:40000800:"Ahh, our gracious Lord..."
call = t370401_x4(text1=40000800, flag1=0, mode1=1)
if call.Done():
pass
elif GetDistanceToPlayer() > 12:
"""State 6"""
assert t370401_x1()
else:
"""State 5"""
pass
"""State 8"""
return 0
def t370401_x8():
"""State 0,2,1,3"""
return 0
def t370401_x9():
"""State 0,1,2,3"""
assert t370401_x1()
"""State 4"""
return 0
def t370401_x10():
"""State 0"""
while True:
"""State 1"""
call = t370401_x12()
assert not GetEventStatus(1564)
"""State 2"""
call = t370401_x13()
assert GetEventStatus(1564) == 1
def t370401_x11():
"""State 0,1"""
assert t370401_x1()
"""State 2"""
return 0
def t370401_x12():
"""State 0,2"""
call = t370401_x15()
assert CheckSelfDeath() == 1
"""State 1"""
t370401_x7()
def t370401_x13():
"""State 0"""
def t370401_x14():
"""State 0,1"""
if not GetEventStatus(73700321):
"""State 2,6"""
# talk:40000400:" "
assert t370401_x3(text2=40000400, z1=73700321, flag2=0, mode2=1)
elif not GetEventStatus(73700322):
"""State 3,7"""
# talk:40000500:" "
assert t370401_x3(text2=40000500, z1=73700322, flag2=0, mode2=1)
else:
"""State 4,5"""
SetEventState(73700321, 0)
SetEventState(73700322, 0)
"""State 8"""
# talk:40000600:"Whatever for!"
assert t370401_x4(text1=40000600, flag1=0, mode1=1)
"""State 9"""
return 0
def t370401_x15():
"""State 0"""
while True:
"""State 6"""
call = t370401_x0(z2=6000, flag3=1575, flag4=6000, flag5=6000, flag6=6000, flag7=6000)
if call.Done():
"""State 4"""
call = t370401_x5()
if call.Done():
pass
elif IsAttackedBySomeone() == 1:
"""State 2"""
Label('L0')
call = t370401_x6()
def ExitPause():
RemoveMyAggro()
if call.Done():
pass
elif IsPlayerDead() == 1:
break
elif IsPlayerDead() == 1:
break
elif GetDistanceToPlayer() > 5:
"""State 5"""
call = t370401_x9()
if call.Done() and GetDistanceToPlayer() < 4.9:
pass
elif IsAttackedBySomeone() == 1:
Goto('L0')
elif IsAttackedBySomeone() == 1:
Goto('L0')
elif IsPlayerDead() == 1:
break
elif GetEventStatus(73700330) == 1:
"""State 1"""
assert not GetEventStatus(73700330)
"""State 3"""
t370401_x8()
|
[
"matt@thefifthmatt.com"
] |
matt@thefifthmatt.com
|
6daf09de199c4720ba3b97533878c2c6117c7379
|
2be43de3e8b6ce2f46da2c9afb021a6ea2abb74a
|
/neighbour/settings.py
|
0c3691a9f2cd1a344b46b36ff6d4cc39343afdea
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
CollinsMuiruri/IS-PROJECT
|
345877fe61d14e94c8ec4a0bf8f6a4d76698fd43
|
2e59bb95a6dc3483e699140bde6792f6e92e1356
|
refs/heads/master
| 2021-09-09T12:01:01.065687
| 2019-07-16T14:29:46
| 2019-07-16T14:29:46
| 197,208,301
| 0
| 0
| null | 2021-09-08T01:08:59
| 2019-07-16T14:19:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,667
|
py
|
"""
Django settings for neighbour project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import dj_database_url
from decouple import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'hood',
'chief',
'bootstrap3',
'bootstrap4',
'jet_django',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'neighbour.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'neighbour.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'neighbour',
'USER': 'collins',
'PASSWORD': 'wildgoosechase'
}
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
|
[
"wanyekicollins@gmail.com"
] |
wanyekicollins@gmail.com
|
047e8abdf1b097fc3d9312e4b3df9a03efecc976
|
11ca230c3db96ac41fa90104d502fde51aae306c
|
/04.기하학적 변환/6.remapping.py
|
562d2fb460ecc045ae972bc4af5e18ea09445cfa
|
[] |
no_license
|
wonsgong/Computer-Vision
|
e849ead6cea5ab5c274ef78643961a6138a6e975
|
09ada035299032337498f36198d2b8d3c3de1f01
|
refs/heads/main
| 2023-05-30T15:37:19.548360
| 2021-06-09T10:27:08
| 2021-06-09T10:27:08
| 353,696,068
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
import sys
import numpy as np
import cv2
src = cv2.imread('image/tekapo.bmp')
if src is None:
print("Image load failed")
sys.exit()
h,w = src.shape[:2]
mapy, mapx = np.indices((h,w),dtype=np.float32)
mapy = mapy + 10 * np.sin(mapx / 32)
dst = cv2.remap(src,mapx,mapy,cv2.INTER_LINEAR)
cv2.imshow('src',src)
cv2.imshow('dst',dst)
cv2.waitKey()
cv2.destroyAllWindows()
|
[
"42221106+wonsgong@users.noreply.github.com"
] |
42221106+wonsgong@users.noreply.github.com
|
91f0986327d0af5377be5d57f2f62bb0a350c79c
|
2d93403fac1645fdbf1727f0d17fbea6eeef470a
|
/decorators/class_decorator.py
|
f027fda59d0aee11df10298deccf8addf27338d7
|
[
"MIT"
] |
permissive
|
Minkov/python-oop-2020-02
|
d13c8c8feaa9ad41c524fc82887a98745115ac57
|
d2acb1504c1a135cded2ae6ff42acccb303d9ab1
|
refs/heads/master
| 2021-02-04T00:43:14.997404
| 2020-03-26T18:21:03
| 2020-03-26T18:21:03
| 243,588,830
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
class Logger:
def __init__(self, function):
self.function = function
def __call__(self, *args, **kwargs):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as ex:
# with open(file, 'a') as log_file:
# log_file.write(f'{ex} thrown from {func.__name__}\n')
print(f'{ex} thrown from {func.__name__}')
raise ex
return wrapper
|
[
"DonchoMinkov@gmail.com"
] |
DonchoMinkov@gmail.com
|
0366f2bbf07bea5d9926d82c21d9601671a10744
|
2d3cb7101cae992a58a1b91ee22be7285bc3154e
|
/pyart/core/setup.py
|
be4d0e3bc25ad59e259bea291dc0e2f41650a9f4
|
[
"BSD-3-Clause"
] |
permissive
|
scollis/pyart
|
341aca11a1e1b43482028bb688ad901e61f9a494
|
1a74b33e33df024cbc203ab1936eb5e7df4e92e7
|
refs/heads/main
| 2022-02-19T11:57:09.279907
| 2022-01-20T17:31:02
| 2022-01-20T17:31:02
| 450,486,932
| 1
| 0
|
NOASSERTION
| 2022-01-21T12:41:51
| 2022-01-21T12:41:51
| null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('core', parent_package, top_path)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
[
"jjhelmus@gmail.com"
] |
jjhelmus@gmail.com
|
ad509fa452ed2f6659bfbdd82033485f1dd7412f
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/yFEMocjdiRjPhoDqv_2.py
|
85c35556b13ff3699cdce5d83ac3bd06301dcef3
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
def prime_in_range(n1, n2):
Range = []
Start = n1
End = n2
while (Start <= End):
Range.append(Start)
Start += 1
Counter = 0
Length = len(Range)
while (Counter < Length):
Value = Range[Counter]
Factor = 1
Factors = []
while (Factor <= Value):
if (Value % Factor == 0):
Factors.append(Factor)
Factor += 1
else:
Factor += 1
Span = len(Factors)
if (Span == 2):
return True
else:
Counter += 1
return False
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
15167f5da718def7ad4042fcb6379b086f5a8513
|
dcb9c42dde1436a474dbedbde9f30eaabc898ad3
|
/scripts/marline-khavele.py
|
d7a8fa0c4e8f06ad48bbec6932ac708ccc817819
|
[] |
no_license
|
farouk-afolabi/HNG-Script
|
c96aba3264d1f6d2e12119131d4126e680f9bb81
|
5cd2796f7a3c3d72237232237b9c68b666bf5dee
|
refs/heads/master
| 2022-09-27T19:15:30.303847
| 2020-06-03T21:06:46
| 2020-06-03T21:06:46
| 269,051,273
| 1
| 1
| null | 2020-06-03T18:59:39
| 2020-06-03T09:54:36
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 175
|
py
|
# stage 2 task
name = "Marline Khavele"
id = "HNG-04957"
language = "python"
print(
f"Hello World, This is {name } with HNGi7 {id} using {language} for stage 2 task"
)
|
[
"marlinekhavele6@gmail.com"
] |
marlinekhavele6@gmail.com
|
c91d47c4a234399c275744c668f60c5ac3ac7dcc
|
4ea6a1eb0c55f4d974ec4a0d2d3bb3228c48b62a
|
/django/apps/photo/migrations/0025_unique_together_story_image.py
|
6bab330e3add6d836278674da1be5b4fa5f81218
|
[
"Apache-2.0"
] |
permissive
|
universitas/universitas.no
|
16993d2fb65f21eff4a0cfd72540278276b24531
|
911a2541c77eca522ba5a723f175786f4f9eb481
|
refs/heads/master
| 2023-04-28T14:51:56.849564
| 2021-09-21T18:49:36
| 2021-09-21T18:52:17
| 19,112,283
| 19
| 6
|
Apache-2.0
| 2023-04-15T19:12:19
| 2014-04-24T14:50:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,418
|
py
|
from collections import Counter
from django.db import migrations
import sorl.thumbnail.fields
import apps.photo.models
from utils.merge_model_objects import merge_instances
def dedupe_storyimages(apps, schema_editor):
"""merge storyimages with same parent_story/imagefile."""
StoryImage = apps.get_model("stories", "StoryImage")
pairs = StoryImage.objects.values_list('imagefile_id', 'parent_story_id')
dupes = (key for key, val in Counter(pairs).items() if val > 1)
for imagefile, parent_story in dupes:
story_images = StoryImage.objects.filter(
imagefile=imagefile,
parent_story=parent_story,
).order_by('-top', 'index')
merge_instances(*story_images)
class Migration(migrations.Migration):
dependencies = [
('photo', '0024_auto_20180421_1957'),
]
operations = [
migrations.RunPython(
code=dedupe_storyimages,
reverse_code=migrations.RunPython.noop,
),
migrations.AlterField(
model_name='imagefile',
name='original',
field=sorl.thumbnail.fields.ImageField(
height_field='full_height',
max_length=1024,
null=True,
upload_to=apps.photo.models.upload_image_to,
verbose_name='original',
width_field='full_width'
),
),
]
|
[
"haakenlid@gmail.com"
] |
haakenlid@gmail.com
|
d5a32dd2120e713308cbab8ae1ce4c1061696c20
|
5f14603614bf9357b03c147af3423bb500f15ad8
|
/fe2/assettag/send_mail.py
|
ec40e165f715420971f0ada09c8d5d00bb7cedba
|
[] |
no_license
|
damnedsteven/emcn
|
76aa5449db00a0cb1dd8487c1bf19b4d4ed52014
|
89cdeb9d200f699772a0473fe9fd9b030d78cbc7
|
refs/heads/master
| 2021-01-23T05:24:03.401715
| 2018-05-03T09:12:52
| 2018-05-03T09:12:52
| 86,296,572
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,618
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from email import encoders
from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr, formataddr
from email.mime.multipart import MIMEMultipart, MIMEBase
import smtplib
from datetime import datetime, timedelta
import MySQLdb
import os
my_path = os.path.dirname(os.path.abspath(__file__))
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def _format_addr(s):
name, addr = parseaddr(s)
return formataddr((Header(name, 'utf-8').encode(), addr))
def query_mysql(query):
# Get data from 200 DB
conn = MySQLdb.connect("16.187.230.200", "yi", "asdfqwer", "shortage", charset = 'utf8')
cursor = conn.cursor()
cursor.execute(query)
#get header and rows
header = [i[0] for i in cursor.description]
rows = [list(i) for i in cursor.fetchall()]
#append header to rows
rows.insert(0,header)
cursor.close()
conn.close()
return rows
#take list of lists as argument
def nlist_to_html(list2d):
#bold header
htable=u'<table border="1" bordercolor=000000 cellspacing="0" cellpadding="1" style="table-layout:fixed;vertical-align:bottom;font-size:13px;font-family:verdana,sans,sans-serif;border-collapse:collapse;border:1px solid rgb(130,130,130)" >'
list2d[0] = [u'<b>' + i + u'</b>' for i in list2d[0]]
#
for row in list2d:
newrow = u'<tr>'
newrow += u'<td align="left" style="padding:1px 4px">'+unicode(row[0])+u'</td>'
row.remove(row[0])
newrow = newrow + ''.join([u'<td align="right" style="padding:1px 4px">' + unicode(x or "") + u'</td>' for x in row])
newrow += '</tr>'
htable+= newrow
htable += '</table>'
return htable
def sql_html(query):
return nlist_to_html(query_mysql(query))
now = datetime.now()
earlier = now - timedelta(hours=12)
# from_date = earlier.strftime('%y') + '/' + earlier.strftime('%m') + '/' + earlier.strftime('%d') + '-' + earlier.strftime('%H')
to_date = now.strftime('%y') + '/' + now.strftime('%m') + '/' + now.strftime('%d') + '-' + now.strftime('%H')
from_addr = 'shortage@emcn.cn'
to_addr = ['yi.li5@hpe.com']
cc_addr = ['yi.li5@hpe.com']
bcc_addr = ['brooklynburgerking@gmail.com']
# to_addr = ['yi.li5@hpe.com', 'cpmo-iss-buyer@hpe.com', 'zhou@hpe.com', 'cpmo-iss-planner@hpe.com', 'taojun.sj@hpe.com', 'joy-m.huang@hpe.com', 'hai-chuan.zhao@hpe.com', 'ivy.y.lin@hpe.com']
smtp_server = 'smtp3.hpe.com'
query = """
SELECT
is_copy `Copy#`,
pn `Part No.`,
ctrl_id `Ctrl ID`,
buyer_name `Buyer`,
shortage_qty `TTL-S`,
pline_shortage_qty `S-RAW`,
passthru_shortage_qty `S-OPT`,
earliest_bkpl `Earliest BKPL Time`,
arrival_qty `Supp.Q`,
eta `ETA`,
CASE
WHEN slot = '0' THEN 'morning'
WHEN slot = '1' THEN 'afternoon'
WHEN slot = '2' THEN 'night'
END `Slot`,
remark `Remark`,
carrier.name `Carrier`,
judge_supply `Judge Supply?`,
shortage_reason.name `Shortage Reason (Category)`,
shortage_reason_detail `Shortage Reason (Comments)`,
bill_number `HAWB`,
date_format(lastupdated, "%b %d %Y %h:%i %p") `Updated`
FROM
pn
LEFT JOIN
carrier
ON pn.id_carrier=carrier.id
LEFT JOIN
shortage_reason
ON pn.id_shortage_reason=shortage_reason.id
WHERE (status=1 OR is_copy = -1) AND received IS NULL
ORDER BY pn
"""
text = """\
<html>
<head></head>
<body>
<p>Hi all,<br><br>
Here is the latest material shortage status, pls check and fill in the ETA schedule asap. Pls let <a href="mailto:taojun.sj@hpe.com">SJ, Taojun (EMCN Warehouse)</a> know if there is any wrong information. Thanks for your attention!<br>
<br>请登录网页版缺料显示系统: <a href="http://16.187.228.117/shortage/buyer/">网址</a>
</p>
<br>
</body>
</html>
"""
table = sql_html(query)
text2 = """\
<html>
<head></head>
<body>
<p><br>Thanks & Best Regs.<br>
cpmo ESSN warehouse system<br>
Tel: 862120510334
</p>
<br>
</body>
</html>
"""
msg = MIMEMultipart()
# 邮件正文是MIMEText:
msg.attach(MIMEText(text+table+text2, 'html', 'utf-8'))
msg['From'] = _format_addr('Shortage Alert <%s>' % from_addr)
# msg['To'] = _format_addr('recipient <%s>' % ",".join(to_addr))
msg['To'] = ", ".join(to_addr)
# msg['CC'] = _format_addr('admin <%s>' % ",".join(cc_addr))
msg['CC'] = ", ".join(cc_addr)
msg['BCC'] = ", ".join(bcc_addr)
msg['Subject'] = Header('for Buyer - ESSN material shortage (%s)' % (to_date), 'utf-8').encode()
to_addrs = to_addr + cc_addr + bcc_addr
server = smtplib.SMTP(smtp_server, 25)
server.set_debuglevel(1)
#server.login(from_addr, password)
server.sendmail(from_addr, to_addrs, msg.as_string())
server.quit()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
a031c313195e9f2b8cf80dab81820b5fad9aebac
|
851b8ac597146bf467b96dea48331332eba48833
|
/custom_components/lightwave2/sensor.py
|
e7865f1dec4978ec26fca4528c2fb4c5804b8bf5
|
[] |
no_license
|
bigal82/bruces_homeassistant_config
|
9d569052ed1efd58a4f7035eba19007ff6be56c5
|
3def555be1b8e72a0f7a4978974d96c54544053a
|
refs/heads/main
| 2023-08-24T09:09:37.406093
| 2021-10-25T07:09:56
| 2021-10-25T07:09:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,166
|
py
|
import logging
from .const import LIGHTWAVE_LINK2, LIGHTWAVE_ENTITIES, LIGHTWAVE_WEBHOOK, DOMAIN
from homeassistant.components.sensor import STATE_CLASS_MEASUREMENT, STATE_CLASS_TOTAL_INCREASING, SensorEntity, SensorEntityDescription
from homeassistant.const import POWER_WATT, ENERGY_WATT_HOUR, DEVICE_CLASS_POWER, DEVICE_CLASS_ENERGY
from homeassistant.core import callback
DEPENDENCIES = ['lightwave2']
_LOGGER = logging.getLogger(__name__)
ENERGY_SENSORS = [
SensorEntityDescription(
key="power",
native_unit_of_measurement=POWER_WATT,
device_class=DEVICE_CLASS_POWER,
state_class=STATE_CLASS_MEASUREMENT,
name="Current Consumption",
),
SensorEntityDescription(
key="energy",
native_unit_of_measurement=ENERGY_WATT_HOUR,
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
name="Total Consumption",
)
]
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Find and return LightWave sensors."""
sensors = []
link = hass.data[DOMAIN][config_entry.entry_id][LIGHTWAVE_LINK2]
url = hass.data[DOMAIN][config_entry.entry_id][LIGHTWAVE_WEBHOOK]
for featureset_id, name in link.get_energy():
for description in ENERGY_SENSORS:
sensors.append(LWRF2Sensor(name, featureset_id, link, url, description))
for featureset_id, name in link.get_switches():
if link.get_featureset_by_id(featureset_id).reports_power():
for description in ENERGY_SENSORS:
sensors.append(LWRF2Sensor(name, featureset_id, link, url, description))
for featureset_id, name in link.get_lights():
if link.get_featureset_by_id(featureset_id).reports_power():
for description in ENERGY_SENSORS:
sensors.append(LWRF2Sensor(name, featureset_id, link, url, description))
hass.data[DOMAIN][config_entry.entry_id][LIGHTWAVE_ENTITIES].extend(sensors)
async_add_entities(sensors)
class LWRF2Sensor(SensorEntity):
"""Representation of a LightWaveRF power usage sensor."""
def __init__(self, name, featureset_id, link, url, description):
self._name = f"{name} {description.name}"
self._device = name
_LOGGER.debug("Adding sensor: %s ", self._name)
self._featureset_id = featureset_id
self._lwlink = link
self._url = url
self.entity_description = description
self._state = self._lwlink.get_featureset_by_id(self._featureset_id).features[self.entity_description.key][1]
self._gen2 = self._lwlink.get_featureset_by_id(
self._featureset_id).is_gen2()
async def async_added_to_hass(self):
"""Subscribe to events."""
await self._lwlink.async_register_callback(self.async_update_callback)
if self._url is not None:
for featurename in self._lwlink.get_featureset_by_id(self._featureset_id).features:
featureid = self._lwlink.get_featureset_by_id(self._featureset_id).features[featurename][0]
_LOGGER.debug("Registering webhook: %s %s", featurename, featureid.replace("+", "P"))
req = await self._lwlink.async_register_webhook(self._url, featureid, "hass" + featureid.replace("+", "P"), overwrite = True)
@callback
def async_update_callback(self, **kwargs):
"""Update the component's state."""
self.async_schedule_update_ha_state(True)
@property
def should_poll(self):
"""Lightwave2 library will push state, no polling needed"""
return False
@property
def assumed_state(self):
"""Gen 2 devices will report state changes, gen 1 doesn't"""
return not self._gen2
async def async_update(self):
"""Update state"""
self._state = self._lwlink.get_featureset_by_id(self._featureset_id).features[self.entity_description.key][1]
@property
def name(self):
"""Lightwave switch name."""
return self._name
@property
def unique_id(self):
"""Unique identifier. Provided by hub."""
return f"{self._featureset_id}_{self.entity_description.key}"
@property
def native_value(self):
return self._state
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
attribs = {}
for featurename, featuredict in self._lwlink.get_featureset_by_id(self._featureset_id).features.items():
attribs['lwrf_' + featurename] = featuredict[1]
attribs['lrwf_product_code'] = self._lwlink.get_featureset_by_id(self._featureset_id).product_code
return attribs
@property
def device_info(self):
return {
'identifiers': {
# Serial numbers are unique identifiers within a specific domain
(DOMAIN, self._featureset_id)
},
'name': self._device,
'manufacturer': "Lightwave RF",
'model': self._lwlink.get_featureset_by_id(
self._featureset_id).product_code
#TODO 'via_device': (hue.DOMAIN, self.api.bridgeid),
}
|
[
"Bruce.hartley@clickspares.co.uk"
] |
Bruce.hartley@clickspares.co.uk
|
922ff5d630c9b04ec7b8d6b206e71f56a91e60c2
|
f4dedea53630c9cbdc6297ae4a7e2a8195fd7691
|
/10 Advanced Techniques/19 Signal Processing.py
|
c172f714e1656b011d12b7c13426a9755447f1f3
|
[] |
no_license
|
nikkisora/cses_problemset
|
d089db048444e07e002f131b4323adc9df95b05b
|
03160f33e36cdc6d538403357b36bcb015b4dba7
|
refs/heads/master
| 2023-07-03T10:34:23.487709
| 2021-08-05T21:13:49
| 2021-08-05T21:13:49
| 379,251,540
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
'''
CSES - Signal Processing
Time limit: 1.00 s
Memory limit: 512 MB
You are given two integer sequences: a signal and a mask. Your task is to process the signal by moving the mask through the signal from left to right. At each mask position calculate the sum of products of aligned signal and mask values in the part where the signal and the mask overlap.
Input
The first input line consists of two integers n and m: the length of the signal and the length of the mask.
The next line consists of n integers a_1,a_2,...,a_n defining the signal.
The last line consists of m integers b_1,b_2,...,b_m defining the mask.
Output
Print n+m-1 integers: the sum of products of aligned values at each mask position from left to right.
Constraints
1 <= n,m <= 2 * 10^5
1 <= a_i,b_i <= 100
Example
Input:
5 3
1 3 2 1 4
1 2 3
Output:
3 11 13 10 16 9 4
Explanation: For example, at the second mask position the sum of aligned products is 2 * 1 + 3 * 3 = 11.
'''
|
[
"32413317+nikkisora@users.noreply.github.com"
] |
32413317+nikkisora@users.noreply.github.com
|
fa987bfdd73ebad2cf8c88d6d524f5747f1813f0
|
0827979a9e3bfca5900726f1cef428f8a8c819ba
|
/NRPyPN/PN_Hamiltonian_SS.py
|
c7b73e919953363b5e77f4d954b77a8449fb0f81
|
[
"BSD-2-Clause"
] |
permissive
|
zachetienne/nrpytutorial
|
12763c9c0e0be0007b8cae5688225a33c8fb4442
|
1230b4d602e0657d42de0c7ea193c34058e4aca9
|
refs/heads/master
| 2023-09-01T06:31:22.549594
| 2023-08-14T19:47:16
| 2023-08-14T19:47:16
| 135,812,438
| 88
| 46
|
BSD-2-Clause
| 2023-09-02T00:25:36
| 2018-06-02T11:34:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,781
|
py
|
# As documented in the NRPyPN notebook
# PN-Hamiltonian-Spin-Spin.ipynb, this Python script
# generates spin-spin coupling pieces of the
# post-Newtonian (PN) Hamiltonian, up to and
# including 3PN order.
# Core functions:
# f_H_SS_2PN(m1,m2, S1U,S2U, nU, q):
# Compute the complete H_SS_2PN term and store to
# global variable of the same name.
# f_HS1S2_3PN(m1,m2, n12U, S1U,S2U, p1U,p2U, q)):
# Compute HS1S2_3PN and store to global variable
# of the same name.
# f_H_SS_S1sq_S2sq_3PN(m1,m2, n12U,n21U, S1U,S2U, p1U,p2U, q):
# Compute H_SS_S1sq_S2sq_3PN and store to global
# variable of the same name.
# Author: Zach Etienne
# zachetie **at** gmail **dot* com
# Step 0: Add NRPy's directory to the path
# https://stackoverflow.com/questions/16780014/import-file-from-parent-directory
import indexedexpNRPyPN as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
from NRPyPN_shortcuts import div,dot,cross # NRPyPN: shortcuts for e.g., vector operations
#################################
#################################
# 2PN spin-spin term, from Eqs. 2.18 and 2.19 of
# Buonanno, Chen, and Damour (2006):
# https://arxiv.org/abs/gr-qc/0508067
def f_H_SS_2PN(m1,m2, S1U,S2U, nU, q):
S0U = ixp.zerorank1()
for i in range(3):
S0U[i] = (1 + m2/m1)*S1U[i] + (1 + m1/m2)*S2U[i]
global H_SS_2PN
mu = m1*m2 / (m1 + m2)
H_SS_2PN = mu/(m1 + m2) * (3*dot(S0U,nU)**2 - dot(S0U,S0U)) / (2*q**3)
#################################
#################################
# 3PN spin-spin S_1,S_2 coupling term, from Eq. 2.11 of
# Steinhoff, Hergt, and Sch\"afer (2008a)
# https://arxiv.org/abs/0712.1716
def f_H_SS_S1S2_3PN(m1,m2, n12U, S1U,S2U, p1U,p2U, r12):
global H_SS_S1S2_3PN
H_SS_S1S2_3PN = (+div(3,2)*(dot(cross(p1U,S1U),n12U)*dot(cross(p2U,S2U),n12U))
+ 6*(dot(cross(p2U,S1U),n12U)*dot(cross(p1U,S2U),n12U))
-15*dot(S1U,n12U)*dot(S2U,n12U)*dot(p1U,n12U)*dot(p2U,n12U)
-3*dot(S1U,n12U)*dot(S2U,n12U)*dot(p1U,p2U)
+3*dot(S1U,p2U)*dot(S2U,n12U)*dot(p1U,n12U)
+3*dot(S2U,p1U)*dot(S1U,n12U)*dot(p2U,n12U)
+3*dot(S1U,p1U)*dot(S2U,n12U)*dot(p2U,n12U)
+3*dot(S2U,p2U)*dot(S1U,n12U)*dot(p1U,n12U)
-div(1,2)*dot(S1U,p2U)*dot(S2U,p1U)
+dot(S1U,p1U)*dot(S2U,p2U)
-3*dot(S1U,S2U)*dot(p1U,n12U)*dot(p2U,n12U)
+div(1,2)*dot(S1U,S2U)*dot(p1U,p2U))/(2*m1*m2*r12**3)
H_SS_S1S2_3PN+= (-dot(cross(p1U,S1U),n12U)*dot(cross(p1U,S2U),n12U)
+dot(S1U,S2U)*dot(p1U,n12U)**2
-dot(S1U,n12U)*dot(S2U,p1U)*dot(p1U,n12U))*3/(2*m1**2*r12**3)
H_SS_S1S2_3PN+= (-dot(cross(p2U,S2U),n12U)*dot(cross(p2U,S1U),n12U)
+dot(S1U,S2U)*dot(p2U,n12U)**2
-dot(S2U,n12U)*dot(S1U,p1U)*dot(p2U,n12U))*3/(2*m2**2*r12**3)
H_SS_S1S2_3PN+= (+dot(S1U,S2U)-2*dot(S1U,n12U)*dot(S2U,n12U))*6*(m1+m2)/r12**4
#################################
#################################
# 3PN spin-orbit coupling term, from Eq. 9 of
# Steinhoff, Hergt, and Sch\"afer (2008b)
# https://arxiv.org/abs/0809.2200
def f_H_SS_S1sq_S2sq_3PN(m1,m2, n12U,n21U, S1U,S2U, p1U,p2U, r12):
def f_H_SS_particle(m1,m2, n12U, S1U,_S2U, p1U,p2U, r12): # _S2U unused.
H_SS_S1sq_S2sq_3PN_particle = (
+ m2/(4*m1**3)*dot(p1U,S1U)**2
+3*m2/(8*m1**3)*dot(p1U,n12U)**2*dot(S1U,S1U)
-3*m2/(8*m1**3)*dot(p1U,p1U)*dot(S1U,n12U)**2
-3*m2/(4*m1**3)*dot(p1U,n12U)*dot(S1U,n12U)*dot(p1U,S1U)
-3/(4*m1*m2)*dot(p2U,p2U)*dot(S1U,S1U)
+9/(4*m1*m2)*dot(p2U,p2U)*dot(S1U,n12U)**2
+3/(4*m1**2)*dot(p1U,p2U)*dot(S1U,S1U)
-9/(4*m1**2)*dot(p1U,p2U)*dot(S1U,n12U)**2
-3/(2*m1**2)*dot(p1U,n12U)*dot(p2U,S1U)*dot(S1U,n12U)
+3/(m1**2) *dot(p2U,n12U)*dot(p1U,S1U)*dot(S1U,n12U)
+3/(4*m1**2)*dot(p1U,n12U)*dot(p2U,n12U)*dot(S1U,S1U)
-15/(4*m1**2)*dot(p1U,n12U)*dot(p2U,n12U)*dot(S1U,n12U)**2)/r12**3
H_SS_S1sq_S2sq_3PN_particle+= -(+div(9,2)*dot(S1U,n12U)**2
-div(5,2)*dot(S1U,S1U)
+7*m2/m1*dot(S1U,n12U)**2
-3*m2/m1*dot(S1U,S1U))*m2/r12**4
return H_SS_S1sq_S2sq_3PN_particle
global H_SS_S1sq_S2sq_3PN
H_SS_S1sq_S2sq_3PN = (+f_H_SS_particle(m1,m2, n12U, S1U,S2U, p1U,p2U, r12)
+f_H_SS_particle(m2,m1, n21U, S2U,S1U, p2U,p1U, r12))
|
[
"zachetie@gmail.com"
] |
zachetie@gmail.com
|
c5ec7aeea7ebd380c20fdedc5a2edfd5b703ce91
|
8a1bbbe4d3d487fcb5f86c9d5f108ea2b4de1894
|
/df/r_incore.py
|
818b962533399e9d73ea9a297d17207225f2dd09
|
[
"BSD-2-Clause"
] |
permissive
|
molguin-qc/pyscf
|
a7abaa7b61143c58fae065d2cf035952e782a1f0
|
0ca910a816e116542c83913b52e7a4a1cad83454
|
refs/heads/master
| 2020-04-06T06:21:13.065884
| 2015-11-24T22:49:49
| 2015-11-24T22:49:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,758
|
py
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import time
import ctypes
import _ctypes
import numpy
import scipy.linalg
import pyscf.lib
from pyscf.lib import logger
import pyscf.gto
from pyscf.df import incore
from pyscf.scf import _vhf
libri = pyscf.lib.load_library('libri')
def _fpointer(name):
return ctypes.c_void_p(_ctypes.dlsym(libri._handle, name))
# (ij|L)
def aux_e2(mol, auxmol, intor='cint3c2e_spinor', aosym='s1', comp=1, hermi=0):
atm, bas, env = \
pyscf.gto.mole.conc_env(mol._atm, mol._bas, mol._env,
auxmol._atm, auxmol._bas, auxmol._env)
c_atm = numpy.array(atm, dtype=numpy.int32)
c_bas = numpy.array(bas, dtype=numpy.int32)
c_env = numpy.array(env)
natm = ctypes.c_int(mol.natm+auxmol.natm)
nbas = ctypes.c_int(mol.nbas)
nao = mol.nao_2c()
naoaux = auxmol.nao_nr()
if aosym == 's1':
eri = numpy.empty((nao*nao,naoaux), dtype=numpy.complex)
fill = _fpointer('RIfill_r_s1_auxe2')
else:
eri = numpy.empty((nao*(nao+1)//2,naoaux), dtype=numpy.complex)
fill = _fpointer('RIfill_r_s2ij_auxe2')
fintor = _fpointer(intor)
cintopt = _vhf.make_cintopt(c_atm, c_bas, c_env, intor)
libri.RIr_3c2e_auxe2_drv(fintor, fill,
eri.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(0), ctypes.c_int(mol.nbas),
ctypes.c_int(mol.nbas), ctypes.c_int(auxmol.nbas),
ctypes.c_int(1), cintopt,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
libri.CINTdel_optimizer(ctypes.byref(cintopt))
return eri
# (L|ij)
def aux_e1(mol, auxmol, intor='cint3c2e_spinor', aosym='s1', comp=1, hermi=0):
pass
def cholesky_eri(mol, auxbasis='weigend', aosym='s1', verbose=0):
t0 = (time.clock(), time.time())
if isinstance(verbose, logger.Logger):
log = verbose
else:
log = logger.Logger(mol.stdout, verbose)
auxmol = incore.format_aux_basis(mol, auxbasis)
j2c = incore.fill_2c2e(mol, auxmol)
log.debug('size of aux basis %d', j2c.shape[0])
t1 = log.timer('2c2e', *t0)
low = scipy.linalg.cholesky(j2c, lower=True)
j2c = None
t1 = log.timer('Cholesky 2c2e', *t1)
j3c_ll = aux_e2(mol, auxmol, intor='cint3c2e_spinor', aosym=aosym)
j3c_ss = aux_e2(mol, auxmol, intor='cint3c2e_spsp1_spinor', aosym=aosym)
t1 = log.timer('3c2e', *t1)
cderi_ll = scipy.linalg.solve_triangular(low, j3c_ll.T, lower=True,
overwrite_b=True)
cderi_ss = scipy.linalg.solve_triangular(low, j3c_ss.T, lower=True,
overwrite_b=True)
# solve_triangular return cderi in Fortran order
cderi = (pyscf.lib.transpose(cderi_ll.T),
pyscf.lib.transpose(cderi_ss.T))
log.timer('cholesky_eri', *t0)
return cderi
if __name__ == '__main__':
from pyscf import scf
mol = pyscf.gto.Mole()
mol.build(
verbose = 0,
atom = [["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ],
basis = 'ccpvdz',
)
cderi = cholesky_eri(mol, verbose=5)
n2c = mol.nao_2c()
c2 = .5 / mol.light_speed
def fjk(mol, dm, *args, **kwargs):
# dm is 4C density matrix
cderi_ll = cderi[0].reshape(-1,n2c,n2c)
cderi_ss = cderi[1].reshape(-1,n2c,n2c)
vj = numpy.zeros((n2c*2,n2c*2), dtype=dm.dtype)
vk = numpy.zeros((n2c*2,n2c*2), dtype=dm.dtype)
rho =(numpy.dot(cderi[0], dm[:n2c,:n2c].T.reshape(-1))
+ numpy.dot(cderi[1], dm[n2c:,n2c:].T.reshape(-1)*c2**2))
vj[:n2c,:n2c] = numpy.dot(rho, cderi[0]).reshape(n2c,n2c)
vj[n2c:,n2c:] = numpy.dot(rho, cderi[1]).reshape(n2c,n2c) * c2**2
v1 = numpy.einsum('pij,jk->pik', cderi_ll, dm[:n2c,:n2c])
vk[:n2c,:n2c] = numpy.einsum('pik,pkj->ij', v1, cderi_ll)
v1 = numpy.einsum('pij,jk->pik', cderi_ss, dm[n2c:,n2c:])
vk[n2c:,n2c:] = numpy.einsum('pik,pkj->ij', v1, cderi_ss) * c2**4
v1 = numpy.einsum('pij,jk->pik', cderi_ll, dm[:n2c,n2c:])
vk[:n2c,n2c:] = numpy.einsum('pik,pkj->ij', v1, cderi_ss) * c2**2
vk[n2c:,:n2c] = vk[:n2c,n2c:].T.conj()
return vj, vk
mf = scf.DHF(mol)
mf.get_jk = fjk
mf.direct_scf = False
ehf1 = mf.scf()
print(ehf1, -76.08073868516945)
cderi = cderi[0].reshape(-1,n2c,n2c)
print(numpy.allclose(cderi, cderi.transpose(0,2,1).conj()))
|
[
"osirpt.sun@gmail.com"
] |
osirpt.sun@gmail.com
|
0022ad2cde11b4459237ac8330bc909f4317b4fd
|
9cf97aa5fafe0ba5e06d72a19b50a7b326857dcf
|
/create_model_input.py
|
7e02025b5139eebef743c40f5db58fca2dfd87f8
|
[] |
no_license
|
Shawn-nau/Time-series-prediction
|
a027b22f250e3dcd859f1d92a41a4e979a1a0526
|
044d34846d04a19898c3c8b874c7e982d545ab40
|
refs/heads/master
| 2020-09-11T13:18:34.457153
| 2019-03-30T15:00:57
| 2019-03-30T15:00:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,183
|
py
|
import logging
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
class Input_builder(object):
def __init__(self):
pass
def __call__(self, model,x,y=None,train_window=20,train_window_2=None):
if model=='weibull':
return self.create_weibull_input(x,y,train_window)
elif model=='svm' or model=='lstm':
return self.create_RNN_input(x,train_window=20)
elif model=='seq2seq':
return self.create_seq2seq_basic_input(x,train_window,train_window_2)
elif str(model)=='arima':
return x.iloc[:,-1].values
elif str(model)=='xgb':
return self.create_xgb_input(x)
def create_weibull_input(self,x,y,train_windows=20):
index_end=len(y)-1
y=list(y)
for yy in y[::-1]:
if yy!=y[-1]:
index_end=y.index(yy)
break
index_begin=index_end-train_windows if (index_end-train_windows>0) else 1
x,y=x[index_begin:index_end],y[index_begin:index_end]
logging.info("Weibull train data {}".format(len(x)))
return np.array(x),np.array(y)
def create_RNN_input(self,x_train,train_window):
#data=self.examples.iloc[:,-1].values
x,y=[],[]
for i in range(len(x_train)-train_window-1):
x.append(x_train[i:i+train_window])
y.append(x_train[i+train_window])
x=np.array(x)
x= x.reshape(x.shape[0],x.shape[1],1)
y=np.array(y)
y=y.reshape(y.shape[0],1)
return x,y
def create_seq2seq_basic_input(self,data,input_seq_length,output_seq_length):
#data=self.examples.iloc[:,-1].values
x,y=[],[]
for i in range(len(data)-input_seq_length-output_seq_length-1):
x.append([data[i:(i+input_seq_length)]])
y.append([data[(i+input_seq_length):(i+input_seq_length+output_seq_length)]])
x = np.array(x)
x2 = x.reshape(x.shape[0],-1, x.shape[1])
y= np.array(y)
y2 = y.reshape(y.shape[0],-1,y.shape[1])
return x2,y2
def create_seq2seq_input(self):
pass
def create_arima_input(self,examples):
data = examples.iloc[:,-1].values
return data
def create_xgb_input(self,examples):
# create date or time related feature as inputs
examples['year']=examples.iloc[:,0].apply(lambda x: int(str(x)[0:4]))
examples['week']=examples.iloc[:,0].apply(lambda x: int(str(x)[4:]))
examples.drop(columns=['Repair week'],inplace=True)
#examples = pd.get_dummies(examples, columns=['year']) # month
return examples.values
def _read_csv(self,data_dir):
examples=pd.read_csv(data_dir)
return examples
def _normalize(self,data):
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(data)
return dataset
class Input_pipe(object):
def __init__(self):
pass
def get_train_features(self):
pass
def get_dev_features(self):
pass
def get_test_features(self):
pass
def create_examples2features(self):
pass
|
[
"tanlongxing888@163.com"
] |
tanlongxing888@163.com
|
ff10be4b7205ee829e3efe5d87de1af27b52f859
|
02bbac5a5e12b44919945ae7e95eb8d4c5bde28d
|
/hyperion/metrics/dcf.py
|
d6dd9c58b03ec60f96d509f00b84566fa255949f
|
[
"Apache-2.0"
] |
permissive
|
whkanggg/hyperion
|
5f594cb97512080cf0523abdc6407a8bc6db4562
|
14a11436d62f3c15cd9b1f70bcce3eafbea2f753
|
refs/heads/master
| 2020-08-09T14:18:04.689788
| 2019-07-25T18:39:01
| 2019-07-25T18:39:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,869
|
py
|
"""
Copyright 2018 Johns Hopkins University (Author: Jesus Villalba)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import xrange
import numpy as np
from .roc import compute_rocch, rocch2eer
def compute_dcf(p_miss, p_fa, prior, normalize=True):
"""Computes detection cost function
DCF = prior*p_miss + (1-prior)*p_fa
Args:
p_miss: Vector of miss probabilities.
p_fa: Vector of false alarm probabilities.
prior: Target prior or vector of target priors.
normalize: if true, return normalized DCF, else unnormalized.
Returns:
Matrix of DCF for each pair of (p_miss, p_fa) and each value of prior.
[len(prior) x len(p_miss)]
"""
prior = np.asarray(prior)
if prior.ndim == 1:
prior = prior[:,None]
dcf = prior * p_miss + (1-prior) * p_fa
if normalize:
dcf /= np.minimum(prior, 1-prior)
return dcf
def compute_min_dcf(tar, non, prior, normalize=True):
"""Computes minimum DCF
min_DCF = min_t prior*p_miss(t) + (1-prior)*p_fa(t)
where t is the decission threshold.
Args:
tar: Target scores.
non: Non-target scores.
prior: Target prior or vector of target priors.
normalize: if true, return normalized DCF, else unnormalized.
Returns:
Vector Minimum DCF for each prior.
Vector of P_miss corresponding to each min DCF.
Vector of P_fa corresponding to each min DCF.
"""
p_miss, p_fa = compute_rocch(tar, non)
dcf = compute_dcf(p_miss, p_fa, prior, normalize)
idx_min_dcf = np.argmin(dcf, axis=-1)
if dcf.ndim==1:
min_dcf = dcf[idx_min_dcf]
p_miss = p_miss[idx_min_dcf]
p_fa = p_fa[idx_min_dcf]
else:
i1 = np.arange(dcf.shape[0])
min_dcf = dcf[i1,idx_min_dcf]
p_miss = p_miss[idx_min_dcf]
p_fa = p_fa[idx_min_dcf]
return min_dcf, p_miss, p_fa
def compute_act_dcf(tar, non, prior, normalize=True):
"""Computes actual DCF by making decisions assuming that scores
are calibrated to act as log-likelihood ratios.
Args:
tar: Target scores.
non: Non-target scores.
prior: Target prior or vector of target priors.
normalize: if true, return normalized DCF, else unnormalized.
Returns:
Vector actual DCF for each prior.
Vector of P_miss corresponding to each act DCF.
Vector of P_fa corresponding to each act DCF.
"""
prior = np.asarray(prior)
if prior.ndim == 1:
assert np.all(prior == np.sort(prior, kind='mergesort')), 'priors must be in ascending order'
else:
prior = prior[None]
num_priors = len(prior)
ntar = len(tar)
nnon = len(non)
#thresholds
t = - np.log(prior) + np.log(1-prior)
ttar = np.concatenate((t, tar))
ii = np.argsort(ttar, kind='mergesort')
r = np.zeros((num_priors + ntar), dtype='int32')
r[ii] = np.arange(1, num_priors + ntar + 1)
r = r[:num_priors]
n_miss = r - np.arange(num_priors, 0, -1)
tnon = np.concatenate((t, non))
ii = np.argsort(tnon, kind='mergesort')
r = np.zeros((num_priors + nnon), dtype='int32')
r[ii] = np.arange(1, num_priors + nnon + 1)
r = r[:num_priors]
n_fa = nnon - r + np.arange(num_priors, 0, -1)
# n_miss2 = np.zeros((num_priors,), dtype='int32')
# n_fa2 = np.zeros((num_priors,), dtype='int32')
# for i in xrange(len(t)):
# n_miss2[i] = np.sum(tar<t[i])
# n_fa2[i] = np.sum(non>t[i])
# assert np.all(n_miss2 == n_miss)
# assert np.all(n_fa2 == n_fa)
# print(n_miss)
# print(n_fa)
p_miss = n_miss/ntar
p_fa = n_fa/nnon
act_dcf = prior * p_miss + (1-prior)*p_fa
if normalize:
act_dcf /= np.minimum(prior, 1-prior)
if len(act_dcf) == 1:
act_dcf = act_dcf[0]
return act_dcf, p_miss, p_fa
def fast_eval_dcf_eer(tar, non, prior, normalize_dcf=True):
"""Computes actual DCF, minimum DCF, EER and PRBE all togther
Args:
tar: Target scores.
non: Non-target scores.
prior: Target prior or vector of target priors.
normalize_cdf: if true, return normalized DCF, else unnormalized.
Returns:
Vector Minimum DCF for each prior.
Vector Actual DCF for each prior.
EER value
PREBP value
"""
p_miss, p_fa = compute_rocch(tar, non)
eer = rocch2eer(p_miss, p_fa)
N_miss = p_miss * len(tar)
N_fa = p_fa * len(non)
prbep = rocch2eer(N_miss, N_fa)
dcf = compute_dcf(p_miss, p_fa, prior, normalize_dcf)
min_dcf = np.min(dcf, axis=-1)
act_dcf, _, _ = compute_act_dcf(tar, non, prior, normalize_dcf)
return min_dcf, act_dcf, eer, prbep
|
[
"jesus.antonio.villalba@gmail.com"
] |
jesus.antonio.villalba@gmail.com
|
df1b94b8ff8b9f70e3b53c78cbdbd988c19b38a9
|
e210c28eeed9d38eb78c14b3a6388eca1e0e85d8
|
/tests/unit_test/app_common/statistics/stats_def_test.py
|
b96578b1c7ca54b12726685ee84f4b9e06a3b7e9
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/NVFlare
|
5a2d2e4c85a3fd0948e25f1ba510449727529a15
|
1433290c203bd23f34c29e11795ce592bc067888
|
refs/heads/main
| 2023-08-03T09:21:32.779763
| 2023-07-05T21:17:16
| 2023-07-05T21:17:16
| 388,876,833
| 442
| 140
|
Apache-2.0
| 2023-09-14T19:12:35
| 2021-07-23T17:26:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,173
|
py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import random
import pandas as pd
from nvflare.app_common.abstract.statistics_spec import Bin, DataType, Histogram, HistogramType
from nvflare.app_common.statistics.numpy_utils import dtype_to_data_type
from nvflare.app_common.utils.json_utils import ObjectEncoder
class TestStatsDef:
def test_dtype_to_data_type(self):
train_data = [
["tom", 10, 15.5],
["nick", 15, 10.2],
["juli", 14],
["tom2", 10, 13.0],
["nick1", 25],
["juli1", 24, 10.5],
]
train = pd.DataFrame(train_data, columns=["Name", "Age", "Edu"])
assert DataType.STRING == dtype_to_data_type(train["Name"].dtype)
assert DataType.INT == dtype_to_data_type(train["Age"].dtype)
assert DataType.FLOAT == dtype_to_data_type(train["Edu"].dtype)
def test_feature_histogram_to_json(self):
even = [1, 3, 5, 7, 9]
odd = [2, 4, 6, 8, 10]
buckets = zip(even, odd)
bins = [Bin(low_value=b[0], high_value=b[1], sample_count=random.randint(10, 100)) for b in buckets]
hist = Histogram(HistogramType.STANDARD, bins)
statistics = {"histogram": {"site-1": {"train": {"feat": hist}}}}
x = json.dumps(statistics, cls=ObjectEncoder)
assert x.__eq__(
{
"histogram": {
"site-1": {
"train": {"feat": [0, [[1, 2, 83], [3, 4, 79], [5, 6, 69], [7, 8, 72], [9, 10, 20]], "null"]}
}
}
}
)
|
[
"noreply@github.com"
] |
NVIDIA.noreply@github.com
|
0acf2de8988b83f552ee0e68ad6596e21dbee688
|
e17b0ad0ebeb361e5565eb3d12e717f296a7b878
|
/campanha/serializers.py
|
59d2a9c5d8533925b7660699f482c3e15c887c8b
|
[] |
no_license
|
easy-rpg/SheetAPI
|
94ea732083c3a7a82577e59e3a882a878772d6eb
|
5542197f8388eed761a15a79c6ccca4fd481ccba
|
refs/heads/master
| 2022-12-11T17:01:16.130002
| 2018-07-05T00:26:48
| 2018-07-05T00:26:48
| 131,898,341
| 1
| 0
| null | 2022-11-22T02:30:09
| 2018-05-02T19:44:34
|
Python
|
UTF-8
|
Python
| false
| false
| 603
|
py
|
from rest_framework.serializers import ModelSerializer, CharField, StringRelatedField
from .models import Campanha, Arco
class ArcoSerializer(ModelSerializer):
campanha_nome = CharField(source='campanha.nome', read_only=True)
personagens = StringRelatedField(many=True, read_only=True)
class Meta:
model = Arco
fields = '__all__'
class CampanhaSerializer(ModelSerializer):
arcos = ArcoSerializer(many=True, read_only=True)
mestre_nome = CharField(source='mestre.username', read_only=True)
class Meta:
model = Campanha
fields = '__all__'
|
[
"rodrigondec@gmail.com"
] |
rodrigondec@gmail.com
|
8822e51cbaa2e4c42d764c8168d1caab8609a540
|
efc6c38070f4587346c88ae2444a8b47bb51a635
|
/backend/nameless_wave_19563/wsgi.py
|
08b98350d05b1e315aaad1417e4a82387add737d
|
[] |
no_license
|
andremcb/nameless-wave-19563
|
ef259d2819855bb7b65f2c1c777a0d7fbf33df49
|
cdfe66614bea363b8dbd25ab3232183971759041
|
refs/heads/master
| 2023-03-12T04:39:05.580066
| 2021-03-03T22:01:29
| 2021-03-03T22:01:29
| 344,275,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
"""
WSGI config for nameless_wave_19563 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'nameless_wave_19563.settings')
application = get_wsgi_application()
|
[
"csantos.machado@gmail.com"
] |
csantos.machado@gmail.com
|
87bb7f7ef350864d08ee12e01c5a02668a812e6e
|
2fc11a0aaf47cbaa64fb1d3aa304c51424a96324
|
/test/basic_test.py
|
1a8c1072e21942884e38dbec0556b33a7a1ac19c
|
[] |
no_license
|
isabella232/dex-cli
|
2cd73758980d0661c083cdf8aebcb8d73f07c297
|
652101177afdc76ab2f378e9a9cc5cc1b7a6aaa8
|
refs/heads/master
| 2022-12-30T18:42:50.279225
| 2020-10-21T08:45:53
| 2020-10-21T08:45:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
# Example of test: Just for Integrating travis PR
# TODO: Add real tests https://github.com/gnosis/dex-cli/issues/25
def inc(x):
return x + 1
def test_answer():
assert inc(4) == 5
|
[
"noreply@github.com"
] |
isabella232.noreply@github.com
|
76f406522001c4ab4dc3b879a3abdad7333ea711
|
8651c2c84e4b70ef6977d9364043605c354e1489
|
/Ch8/02_pets.py
|
c92ecceef9a49b651aaee9681a2e0440e0395b43
|
[] |
no_license
|
sliverz6/Python_Crash_Course
|
c222cf1ff9dbe6518ee36a3db7f376c2e3b2a317
|
44cea09ab066e82feba97fee1e74e61fc7e1e565
|
refs/heads/main
| 2023-02-25T02:57:53.585677
| 2021-01-30T14:27:49
| 2021-01-30T14:27:49
| 333,345,296
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
def describe_pet(pet_name, animal_type="dog"):
"""애완동물에 관한 정보를 출력합니다."""
print("\nI have a " + animal_type + ".")
print("My " + animal_type + "'s name is " + pet_name.title() + ".")
describe_pet("harry") # 위치 매개변수
describe_pet(pet_name="harry", animal_type="hamster") # 키워드 매개변수
|
[
"noreply@github.com"
] |
sliverz6.noreply@github.com
|
331eaa11de4c8d4744427b517f6adbfc7b3e5a25
|
4a36b5979b0753b32cff3956fd97fb8ed8b11e84
|
/0.24/_downloads/ecd77f376b369abaa61bcf309ffb8563/interpolate_bad_channels.py
|
1c7c1f1d7a168c1c71f51760d3aba752b53d2d47
|
[] |
permissive
|
mne-tools/mne-tools.github.io
|
8aac7ae10bf2faeeb875b9a351a5530dc0e53154
|
495e878adc1ef3374e3db88604504d7542b01194
|
refs/heads/main
| 2023-09-03T07:06:00.660557
| 2023-09-03T04:10:18
| 2023-09-03T04:10:18
| 35,639,371
| 12
| 16
|
BSD-3-Clause
| 2023-05-05T19:04:32
| 2015-05-14T22:04:23
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,452
|
py
|
"""
.. _ex-interpolate-bad-channels:
=============================================
Interpolate bad channels for MEG/EEG channels
=============================================
This example shows how to interpolate bad MEG/EEG channels
- Using spherical splines from :footcite:`PerrinEtAl1989` for EEG data.
- Using field interpolation for MEG and EEG data.
In this example, the bad channels will still be marked as bad.
Only the data in those channels is replaced.
"""
# Authors: Denis A. Engemann <denis.engemann@gmail.com>
# Mainak Jas <mainak.jas@telecom-paristech.fr>
#
# License: BSD-3-Clause
# %%
# sphinx_gallery_thumbnail_number = 2
import mne
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
evoked = mne.read_evokeds(fname, condition='Left Auditory',
baseline=(None, 0))
# plot with bads
evoked.plot(exclude=[], picks=('grad', 'eeg'))
# %%
# Compute interpolation (also works with Raw and Epochs objects)
evoked_interp = evoked.copy().interpolate_bads(reset_bads=False)
evoked_interp.plot(exclude=[], picks=('grad', 'eeg'))
# %%
# You can also use minimum-norm for EEG as well as MEG
evoked_interp_mne = evoked.copy().interpolate_bads(
reset_bads=False, method=dict(eeg='MNE'), verbose=True)
evoked_interp_mne.plot(exclude=[], picks=('grad', 'eeg'))
# %%
# References
# ----------
# .. footbibliography::
|
[
"larson.eric.d@gmail.com"
] |
larson.eric.d@gmail.com
|
f310678a9fa600d8ab56e1100b469f3b7d2b850c
|
6b233b45ac4ae18711a7f8a7730eebcf7e4e80ed
|
/dlms_control.py
|
4db110d59c15672ed39fe3e81697db22ab8c0a10
|
[] |
no_license
|
petervanya/PTFEsim
|
251b7501a48ab05245c778be0f39b9bacd821348
|
509ef87df647f5c1231efbbc0d0a84add1da28d6
|
refs/heads/master
| 2021-01-21T04:51:05.644202
| 2016-07-20T16:32:34
| 2016-07-20T16:32:34
| 46,088,758
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,278
|
py
|
#!/usr/bin/env python
"""Usage:
dlms_control.py [--L <L> --dt <dt> --steps <n> --thermo <th> --halo <h>]
Generate DL_MESO control file.
Options:
--L <L> Box length [default: 40.0]
--dt <dt> Timestep [default: 0.05]
--steps <n> Number of steps [default: 10000]
--thermo <th> Print every [default: 100]
--halo <h> Boundary halo, like neighbor [default: 2.5]
pv278@cam.ac.uk, 06/06/16
"""
from docopt import docopt
import sys
args = docopt(__doc__)
L = float(args["--L"])
dt = float(args["--dt"])
N = int(args["--steps"])
thermo = int(args["--thermo"])
halo = float(args["--halo"])
s = "pokus\n\n"
s += "volume " + str(L**3) + "\n"
s += "temperature 1.0\n"
s += "cutoff 1.0\n"
s += "boundary halo " + str(halo) + "\n\n"
s += "timestep " + str(dt) + "\n"
s += "steps " + str(N) + "\n"
s += "equilibration steps 0\n"
s += "scale temperature every 10\n"
s += "trajectory 0 100\n"
s += "stats every 100\n"
s += "stack size 100\n"
s += "print every " + str(thermo) + "\n\n"
s += "job time 1000000.0\n"
s += "close time 1.0\n\n"
s += "ensemble nvt dpdvv\n\n"
s += "finish\n"
print("Box size: %.1f | Timestep: %.3f | Num steps: %i" % (L, dt, N))
open("CONTROL", "w").write(s)
print("CONTROL file saved.")
|
[
"peter.vanya@gmail.com"
] |
peter.vanya@gmail.com
|
bdbc62414e39c5378751c220020b0e1074e5603e
|
560136cbc70809a66d7fd653fadcc5f6ac2f7b8d
|
/buy_info.py
|
cb3e350fcee2935aba754ef4481f5686867ed763
|
[] |
no_license
|
Python51888/Tickets12306
|
4b3c7381bbf163de4b148e6c718977f633323197
|
25db032a835f7617410e080143668a11663573a8
|
refs/heads/master
| 2020-06-15T04:21:33.352932
| 2018-09-25T07:50:50
| 2018-09-25T07:50:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,672
|
py
|
import tkinter as tk
import re
test = 0
def confirm_snp(t_file):
time = t_file[0]
checi = t_file[1]
start_station = t_file[2]
start_time = t_file[3]
start_time = start_time[:2] + ':' + start_time[2:]
stop_station = t_file[4]
stop_time = t_file[5]
stop_time = stop_time[:2] + ':' + stop_time[2:]
zuowei = t_file[7]
user = dict(t_file[6])
prices = t_file[8]
checixinxi = [checi, start_station, start_time, stop_station, stop_time]
root = tk.Tk()
# root.geometry('830x350+500+200')
root.title('购票信息')
root.resizable(width=False, height=False)
# 列车信息
# l1 = tk.Label(root, text='列车信息(余票信息仅供参考)')
# l1.pack(anchor='nw', ipady=20)
ff = tk.LabelFrame(root, text='列车信息(余票信息仅供参考)')
ff.pack()
la1 = tk.Label(ff, text='-------车次信息---------------------------------')
la1.pack(anchor='w', padx=100, pady=10)
# can1 = tk.Canvas(ff,bg = 'blue')
# can1.pack()
# 列车信息显示
f = tk.Frame(ff)
f.pack(anchor='w', padx=100, pady=10)
l2 = tk.Label(f, text=time + ' ')
l2.pack(side=tk.LEFT)
l3 = tk.Label(f, text=checi + ' ')
l3.pack(side=tk.LEFT)
l4 = tk.Label(f, text=start_station)
l4.pack(side=tk.LEFT)
l5 = tk.Label(f, text=start_time + ' --> ')
l5.pack(side=tk.LEFT)
l6 = tk.Label(f, text=stop_station)
l6.pack(side=tk.LEFT)
l7 = tk.Label(f, text=stop_time)
l7.pack(side=tk.LEFT)
la2 = tk.Label(ff, text='-------票价信息---------------------------------')
la2.pack(anchor='w', padx=100, pady=10)
# 座位信息
f2 = tk.Frame(ff)
f2.pack(anchor='w', padx=100, pady=10)
# "YZ_num": "1", # 硬座
# "RZ_num": "2", # 软座
# "YW_num": "3", # 硬卧
# "RW_num": "4", # 软卧
# "GR_num": "6", # 高级软卧
# "TZ_num": "P", # 特等座
# "WZ_num": "WZ", # 无座
# "ZE_num": "O", # 二等座
# "ZY_num": "M", # 一等座
# "SWZ_num": "9", # 商务座
# # zuo_wei = {"YZ_num'": '1',"RZ_num'":'2',"YW_num'":'3',
# "RW_num'":'4',"GR_num'":'6',"TZ_num'":'P',"WZ_num'":'WZ',"ZE_num'":'O',"ZY_num'":'M',"SWZ_num'":'9'}
zuo_weidict = {"YZ_num'": "硬座", "RZ_num'": "软座", "YW_num'": "硬卧", "RW_num'": "软卧",
"GR_num'": "高级软卧", "TZ_num'": "特等座", "WZ_num'": "无座", "ZE_num'": "二等座",
"ZY_num'": "一等座", "SWZ_num'": "商务座"}
v = tk.IntVar(root)
la3 = tk.Label(ff, text='-------乘客信息---------------------------------')
la3.pack(anchor='w', padx=100, pady=10)
for i in range(len(zuowei)):
s = zuowei[i - 1].split(':')
p = prices[i - 1].split(':')
p1 = p[0].split('_')
s1 = s[0].split('_')
regex = re.search(r"'0*(\d+)(\d)'$", p[1])
price1 = regex.group(1) + '.' + regex.group(2)
if s[0] in zuo_weidict:
n = zuo_weidict[s[0]]
rb = tk.Radiobutton(f2, text=n + '(¥' + price1 + ')' + ' ' + '剩余:' + eval(s[1]) + '张', value=i, variable=v)
rb.pack(side=tk.LEFT)
# 乘客信息
f3 = tk.Frame(ff)
f3.pack(anchor='w', padx=100)
user1 = list(user.values())
v2 = tk.IntVar(root)
for x in range(len(user)):
userinfo = user1[x - 1]
rb1 = tk.Radiobutton(f3, text='姓名:' + userinfo[0] + ' 性别:' + userinfo[1]
+ ' 身份证:' + userinfo[2] + ' 票种:' + userinfo[3] + ' 电话:' + userinfo[4],
variable=v2, value=x)
rb1.pack(anchor='nw', ipady=7)
# 信息提交
f4 = tk.Frame(ff)
f4.pack(anchor='w', pady=20, padx=150)
btnback = tk.Button(f4, width=15, text='返回', command=lambda: back(root))
btnback.pack(side=tk.LEFT, padx=50)
btn = tk.Button(f4, width=15, text='提交',
command=lambda: onbtn(zuowei[v.get() - 1], user1[v2.get() - 1], checixinxi, root))
btn.pack(side=tk.LEFT, padx=50)
# root.maxsize(830, 350)
# root.minsize(850, 350)
root.mainloop()
return test
def onbtn(a, b, c, root):
global test
# 获取用户点选数据
zuo_wei = {"YZ_num'": '1', "RZ_num'": '2', "YW_num'": '3',
"RW_num'": '4', "GR_num'": '6', "TZ_num'": 'P', "WZ_num'": 'WZ', "ZE_num'": 'O', "ZY_num'": 'M',
"SWZ_num'": '9'}
zuo_weidict = {"YZ_num'": "硬座", "RZ_num'": "软座", "YW_num'": "硬卧", "RW_num'": "软卧",
"GR_num'": "高级软卧", "TZ_num'": "特等座", "WZ_num'": "无座", "ZE_num'": "二等座",
"ZY_num'": "一等座", "SWZ_num'": "商务座"}
ticket = a.split(':')
b.insert(0, zuo_wei[ticket[0]])
zuoweixinxi = zuo_weidict[ticket[0]]
yonghuxinxi = '车次:' + c[0] + ' ' + c[1] + ' ' + c[2] + '---' + c[3] + ' ' + c[4] \
+ '\n姓名:' + b[1] + ' ' + '性别:' + b[2] + ' ' + '\n身份证:' + b[3] + ' ' + '票种:' + b[4] + ' ' + '\n电话:' + \
b[5] + ' ' + '\n选座信息:' + zuoweixinxi
msg = tk.messagebox.askokcancel(title='请确认购票信息', message=yonghuxinxi)
if msg == True:
test = b
msg1 = tk.messagebox.showinfo('成功', '购票成功,请尽快登录官网付款')
root.destroy()
def back(root):
global test
test = 1
root.destroy()
|
[
"569578851@qq.com"
] |
569578851@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.