blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dc7dddface4c1700efaef4fa192c435f5a8e2390 | 81d7c62c357c086a8990105d4179c9a2cda0f89c | /Requests_module_old_project_cDVR_reference/aurora_cdvr_sanity_tests/scripts/sanity_scripts_ppsv3/sanity_0003_planner_get.py | 0e6bee4051288fd1d99f2c3ca4638cab449042e7 | [] | no_license | JAGASABARIVEL/Python_reference | 57f49a3f8d894f02f8003657a914395f4b55d844 | f2438289f189fc364dbe9dff0421c3df9be366b2 | refs/heads/master | 2020-04-08T19:23:23.406783 | 2018-11-29T12:25:43 | 2018-11-29T12:25:43 | 159,653,055 | 0 | 1 | null | 2020-01-25T19:07:42 | 2018-11-29T11:06:42 | Python | UTF-8 | Python | false | false | 4,314 | py | #!/usr/bin/python
from collections import OrderedDict
from pprint import pprint
import mypaths
from L1commonFunctions import *
from L2commonFunctions import *
from L3commonFunctions import *
from V3_commonFunctions import *
import V3_planner as planner
class TestPlannerGet(object):
"""
Sanity test to check wheteher user will be able to details of the planner successfully.
"""
TC_PHASES = OrderedDict()
def __init__(self, cfg, printflg=False):
self.message = None
self.status = None
self.tims_list = []
self.protocol = None
self.cfg = cfg
self.timeout = None
self.plannerId = None
self.TC_PHASES.update({'1. COLLECTING CONFIGURATION': self.setConfig})
self.TC_PHASES.update(
{'2. GETTING PLANNER(S) DETAILS': self.triggerGetPlanner})
self.kickTest()
def getTims(self):
return self.tims_list
def kickTest(self):
try:
for phase in self.TC_PHASES:
print "=" * 30
print phase
print "=" * 30
self.TC_PHASES[phase]()
self.message = "Testcase Passed : Successfully able to get the details from all the planners allowed for TCs and are working as expected."
self.tims_list.append(0)
self.tims_list.append(self.message)
except Exception as e:
self.message = "Testcase Failed: " + str(e)
self.tims_list.append(1)
self.tims_list.append(self.message)
except AssertionError as ae:
self.message = "Testcase Failed:" + str(ae)
self.tims_list.append(1)
self.tims_list.append(self.message)
finally:
print "=" * 30
print "4. CLEANUP PHASE"
print "=" * 30
self.triggerCleanPlanner()
print self.message
def setConfig(self):
self.message = "Testcase failed : Error Occured while configuration collection "
self.status = 3
self.tims_list = ["TC", "DESC", "US"]
self.abspath = os.path.abspath(__file__)
self.scriptName = os.path.basename(__file__)
(self.test, self.ext) = os.path.splitext(self.scriptName)
print "Starting test " + self.test
print "US: As a SP, I want an API to get the details of the planner."
print "TC:Get the planner details."
# set values based on config
self.protocol = self.cfg['protocol']
self.prefix = self.cfg['sanity']['household_prefix']
# Set a local variables
self.timeout = 2
plannerlimit = self.cfg['sanity']['households_needed']
self.index_pool = range(0, plannerlimit - 1)
def triggerGetPlanner(self):
for index in self.index_pool:
self.plannerId = self.prefix + str(index)
self.getPlanner()
def triggerCleanPlanner(self):
for index in self.index_pool:
self.plannerId = self.prefix + str(index)
self.cleanUp()
def getPlanner(self):
self.message = "Testcase Failed : Cannot able to get the planner details."
response = planner.get_planner(self.cfg, self.plannerId)
assert response, self.message
print "[INFO] Successfully able to get the planner details from the planner %s." % self.plannerId
def cleanUp(self):
print "[INFO] Clean up started for reverting the system to previous state.It will take some seconds please wait..."
planner.cleanup_planner(self.cfg, self.plannerId)
def doit(cfg, printflg=False):
try:
start_time = time.time()
set_errorlogging()
rc = doit_wrapper(cfg, printflg)
end_time = time.time()
return rc
except BaseException:
print "Error Occurred in Script \n"
PrintException()
return (1)
def doit_wrapper(cfg, printflg=False):
create = TestPlannerGet(cfg, printflg=False)
return create.getTims()
if __name__ == "__main__":
scriptName = os.path.basename(__file__)
# read config file
arguments = sys.argv
cfg = relative_config_file(arguments, scriptName)
if cfg['sanity']['print_cfg']:
print "\nThe following configuration is being used:\n"
pprint(cfg)
doit(cfg, True)
| [
"jkarunan@cisco.com"
] | jkarunan@cisco.com |
cf36182d67dfec105b8da482eb00056924e2ce60 | 005037ee69c591c7b36953829e122f26e1410161 | /designerDoorMap.py | 46ef15e2fdb0a8a5553fdc0e9654aef6d69e5f47 | [] | no_license | lincrampton/pythonHackerRankLeetCode | bb9beff9f2de34be968a7947dfa86b9b4c608aa2 | 9e459dc122852e5c812922a5c32cb444af3aaf8e | refs/heads/master | 2023-07-20T05:42:16.826473 | 2021-09-02T20:17:48 | 2021-09-02T20:17:48 | 272,836,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | '''Mr. Vincent works in a door mat manufacturing company. One day, he designed a new door mat with the following specifications:
Mat size must be X. ( is an odd natural number, and is times .)
The design should have 'WELCOME' written in the center.
The design pattern should only use |, . and - characters.'''
n,m = map(int, input().split())
pattern = [('.|.'*(2*i + 1)).center(m, '-') for i in range(n//2)]
print('\n'.join(pattern + ['WELCOME'.center(m, '-')] + pattern[::-1]))
| [
"noreply@github.com"
] | lincrampton.noreply@github.com |
f5eed3a0e6d424de2aa27ca4a53236568f62c27e | 2b9e6e18d705f33541dd164f7ef9a1860ecd7286 | /deployment/run_server.py | 8e446bf6a1ee3d9722449be2beeaadb5a9e76684 | [
"MIT"
] | permissive | vicely07/Malaria-Image-Classification-using-Deep-Learning | 43657ee2765914b87097cb28dc5d025471a3bb12 | 2e10978c459d613461d3a2aed421349e1a0670fe | refs/heads/master | 2020-05-09T15:34:08.782250 | 2019-04-14T16:31:02 | 2019-04-14T16:31:02 | 181,238,090 | 1 | 1 | null | 2019-04-14T15:08:33 | 2019-04-13T23:13:49 | null | UTF-8 | Python | false | false | 4,498 | py | from utils import generate_random_start, generate_from_seed
from flask import Flask, render_template, request
from keras.models import load_model
import tensorflow as tf
from keras import backend as K
from wtforms import Form, TextField, validators, SubmitField, DecimalField, IntegerField, FileField
#
from PIL import Image
from PIL import Image
import numpy as np
import os
import cv2
# Create app
app = Flask(__name__)
UPLOAD_FOLDER = os.path.dirname(__file__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
class ReusableForm(Form):
"""User entry form for entering specifics for generation"""
file = FileField("Upload your image here")
# Submit button
submit = SubmitField("Enter")
def load_keras_model():
"""Load in the pre-trained model"""
global model
model = load_model('../models/cells.h5')
# Required for model to work
global graph
graph = tf.get_default_graph()
#
def convert_to_array(img):
im = cv2.imread(img)
img_ = Image.fromarray(im, 'RGB')
image = img_.resize((50, 50))
return np.array(image)
def get_cell_name(label):
if label==0:
return "Infected with Malaria", "Please visit a doctor"
if label==1:
return "Uninfected with Malaria", ""
def predict_cell(file):
model = load_model('../models/cells.h5')
print("Predicting Type of Cell Image.................................")
ar=convert_to_array(file)
ar=ar/255
label=1
a=[]
a.append(ar)
a=np.array(a)
score=model.predict(a,verbose=1)
K.clear_session()
print(score)
label_index=np.argmax(score)
print(label_index)
acc=np.max(score)
Cell, msg=get_cell_name(label_index)
print('The predicted Cell is a ' + Cell + " with accuracy = %.2f"%(acc*100) + "%")
return Cell + "<br>" + "%.2f"%(acc*100) + "%", int(acc*100), msg#"bar bar-"+ str(int(acc*100)) + " cyan" #Cell,"The predicted Cell is a "+Cell+" with accuracy = "+str(acc)
# Home page
@app.route("/", methods=['GET', 'POST'])
def home():
"""Home page of app with form"""
#predict_cell('../imagedata/data5.jpg')
#predict_cell('../imagedata/data3.jpg')
#predict_cell('../imagedata/data4.jpg')
# Create form
form = ReusableForm(request.form)
## On form entry and all conditions met
if request.method == 'POST' and form.validate():
#print("Loading image " + form.image.data+ " .")
#print("Test" + os.path.join(UPLOAD_FOLDER, form.image.data))
"""
if form.image.data:
image_data = request.files[form.image.name].read()
open(os.path.join(UPLOAD_FOLDER, form.image.data), 'w').write(image_data)
else:
print("img " + form.image.name + " not found")
"""
# check if the post request has the file part
if 'file' not in request.files:
print('No file part')
return render_template('index.html', form=form)
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
print('No selected file')
return render_template('index.html', form=form)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], file.filename))
#path = app.config['UPLOAD_FOLDER']+"/" + file.filename;
str, progress_style, msg = predict_cell(os.path.join(app.config['UPLOAD_FOLDER'], file.filename))
return render_template('data.html', input = str, progress = progress_style, message = msg)
"""
# Extract information
seed = request.form['seed']
diversity = float(request.form['diversity'])
words = int(request.form['words'])
# Generate a random sequence
if seed == 'random':
return render_template('random.html', input=generate_random_start(model=model, graph=graph, new_words=words, diversity=diversity))
# Generate starting from a seed sequence
else:
return render_template('seeded.html', input=generate_from_seed(model=model, graph=graph, seed=seed, new_words=words, diversity=diversity))
"""
# Send template information to index.html
return render_template('index.html', form=form)
if __name__ == "__main__":
print(("* Loading Keras model and Flask starting server..."
"please wait until server has fully started"))
# Run app
app.run(host="0.0.0.0", port=50000)
| [
"38712706+vicely07@users.noreply.github.com"
] | 38712706+vicely07@users.noreply.github.com |
6f8749cb89dbb94f50e78e4e019db75ffbc57e40 | dcd100f8714c2686440bbfd8a826fac6a87b73eb | /Programming language/shell.py | 2a474e2f48a3d0cdadbe8013fa59a62cf9bfb417 | [] | no_license | yugshikhar1/Own-Progranning-Language | 91dd160a6f611aa4429d00281b70bebc4c74c798 | 180dcaec6728cd8d2f852802daffecd60240783e | refs/heads/main | 2023-03-14T15:31:41.996013 | 2021-03-07T11:35:15 | 2021-03-07T11:35:15 | 345,330,110 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | import basic
while True:
text = input('basic > ')
if text.strip() == "": continue
result, error = basic.run('<stdin>', text)
if error:
print(error.as_string())
elif result:
if len(result.elements) == 1:
print(repr(result.elements[0]))
else:
print(repr(result))
| [
"noreply@github.com"
] | yugshikhar1.noreply@github.com |
6fa01c173f56dc1ca5fa0eb989346827158978bc | 336cf3d97f1900f5dcbe842589ab8c38ef5e4853 | /test.py | 1392467cd1537e46f675fe46243dda24e8f04eef | [] | no_license | LinusBF/NN-test | 2f8640be7bbe4ca161113330cd587cf21f354464 | 63042f1e773a5a23fa925c8f7142931baac1bf38 | refs/heads/master | 2020-05-24T21:53:18.911833 | 2019-05-27T14:51:48 | 2019-05-27T14:51:48 | 187,484,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | import sys
from network.network import Network
from utils.utils import get_genotype_data_from_file
def get_test_ten_arr():
nr = int(input("Pick a number between 0-9\n"))
inputs = []
for i in range(10):
inputs.append(1 if i is nr else 0)
return [inputs, inputs]
def ten_arr_test(file_name):
weights, topology = get_genotype_data_from_file(file_name + ".csv")
net = Network(topology)
net.set_weights(weights)
while True:
ins, out = get_test_ten_arr()
print("Answer: " + ", ".join("{:.2f}".format(x) for x in net.process_input(ins)))
print(net)
name = "network_latest"
if len(sys.argv) > 1:
name = sys.argv[1]
ten_arr_test(name)
| [
"contact@linusbf.com"
] | contact@linusbf.com |
f28bdded58afa6b0b4468efbe084bb47993a545a | 51e45fe59c51908b6735dfffef96958f0b9f04e2 | /image_Identify/image_Identify-master/Mydataset.py | c5bd43164d551f9052508104b64230d73ba18c2f | [] | no_license | ztl-35/ML-DL-code | 7c38ab8f81f4537c29e65fd683614db1a2e56e35 | 04653320edae9c4d40a7f2a8ae8b5928632ea0c2 | refs/heads/master | 2020-04-15T15:54:56.827755 | 2019-02-16T10:16:57 | 2019-02-16T10:16:57 | 164,810,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | import torch.utils.data as data
class MyDataset(data.Dataset):
def __init__(self, images, labels):
self.images = images
self.labels = labels
def __getitem__(self, index):
img, target = self.images[index], self.labels[index]
return img, target
def __len__(self):
return len(self.images) | [
"noreply@github.com"
] | ztl-35.noreply@github.com |
4b0f956f9c627884da495726262f7fd721556179 | ad5fe6b2dfba0b04cd8fd638049175fcefe90fcf | /Protheus_WebApp/Modules/SIGAGFE/GFEC050TESTCASE.py | 440d017876e582e622d1d85a3c7c1363b54bcfa0 | [
"MIT"
] | permissive | totvs/tir-script-samples | 63833785de206e3e566baeab6f02e07a4313d6be | ccd0eb8038f4f1c91fe17e0813b15ce6dfa52cc9 | refs/heads/master | 2023-08-16T21:52:10.357667 | 2023-08-11T16:58:29 | 2023-08-11T16:58:29 | 148,842,685 | 24 | 22 | MIT | 2023-06-27T20:13:32 | 2018-09-14T21:17:10 | Python | UTF-8 | Python | false | false | 676 | py | from tir import Webapp
import unittest
class GFEC050(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup("SIGAGFE", "22/06/2020", "T1", "D MG 01", "78")
inst.oHelper.Program("GFEC050")
def test_GFEC050_CT001(self):
self.oHelper.SearchBrowse("D MG 01 00000516")
self.oHelper.SetButton("Visualizar")
self.oHelper.CheckResult("GWN_NRROM", "00000516")
self.oHelper.SetButton("Fechar")
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == "__main__":
unittest.main()
| [
"hadrignoli@gmail.com"
] | hadrignoli@gmail.com |
6fa2f47e0313e25af10bf5c31f5dfbc98b52554b | 76650d7d6b342229c1d870971f00e8f0c8921906 | /src/turtle/behaviours/goToBehaviour.py | 4d3d5bb367b3c95e13c116b96a1b1ce225752a92 | [] | no_license | fgaignier/Spade-Robots | 368f94f2fb227f9e6e8e5e8a5ec8f724076d5b6a | 61c27c86c234a8dba57dc40055a3c3d5778a0c4b | refs/heads/master | 2020-03-08T19:46:38.773429 | 2018-05-14T11:35:26 | 2018-05-14T11:35:26 | 128,363,441 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 867 | py |
from spadeutils.behaviours.spadeBehaviours import OneShotBehaviour
from turtle.services.navigationService import GoToPose
class goToBehaviour(OneShotBehaviour):
GOAL_REACHED = 0
GOAL_NON_REACHED = 0
#navigator = GoToPose()
def __init__(self, name):
OneShotBehaviour.__init__(self, name)
self.navigator = GoToPose()
def process(self):
print "GoToBehaviour"
goal = self.myAgent.parameters[0]
# to be fixed. Quaterion should be given as a parameter
# default value will be used
if self.navigator.goTo(goal):
self._exitcode = goToBehaviour.GOAL_REACHED
print "goal reached"
else:
self._exitcode = goToBehaviour.GOAL_NON_REACHED
print "goal non reached"
def onStop(self):
self.navigator.shutdown() | [
"fgaignier@hotmail.com"
] | fgaignier@hotmail.com |
11fe544e84470e13369ae2af3ccacd651655a889 | c0239d75a8199ec84ad683f945c21785c1b59386 | /dingtalk/api/rest/OapiDepartmentCreateRequest.py | 01b4352f91c2fcafae72e91b062c8e84329aaf83 | [] | no_license | luss613/oauth_dingtalk | 9f253a75ce914c577dbabfb84e97fd883e80e04b | 1e2554642d2b16c642a031670d08efa4a74e8252 | refs/heads/master | 2023-04-23T01:16:33.450821 | 2020-06-18T08:22:57 | 2020-06-18T08:22:57 | 264,966,287 | 1 | 1 | null | 2020-06-18T08:31:24 | 2020-05-18T14:33:25 | Python | UTF-8 | Python | false | false | 754 | py | '''
Created by auto_sdk on 2018.08.17
'''
from dingtalk.api.base import RestApi
class OapiDepartmentCreateRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.createDeptGroup = None
self.deptHiding = None
self.deptPerimits = None
self.deptPermits = None
self.name = None
self.order = None
self.outerDept = None
self.outerDeptOnlySelf = None
self.outerPermitDepts = None
self.outerPermitUsers = None
self.parentBalanceFirst = None
self.parentid = None
self.shareBalance = None
self.sourceIdentifier = None
self.userPerimits = None
self.userPermits = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.department.create'
| [
"paul.lu@belstar.com.cn"
] | paul.lu@belstar.com.cn |
d019969bf2dab04600ec0f507136f89686361ff6 | 97cde0c39cd32a6f1ff92dd4ce358c70ecb791bf | /ballFollow.py | 5be35220d258ce62b77f6f420b547f24284462d3 | [] | no_license | jackistom/shape_detector-opencv-python- | cc954f83dd0b921630668d3bd9b809d42d9f48c4 | 4b66902e7c147f7b0012cdaf4c67c57903c2ed79 | refs/heads/master | 2020-09-13T15:31:44.442042 | 2019-11-20T03:30:55 | 2019-11-20T03:30:55 | 222,829,771 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,219 | py | from gpiozero import Motor
from time import sleep
import cv2
import numpy as np
import imutils
m = Motor(forward=17, backward=18) # fwd=cw bck=ccw
boundaries = [ ( [60, 162, 174], #lower color range
[135, 237, 242] ) ]#upper color range
cap = cv2.VideoCapture(0)
##cv2.imwrite("/media/pi/USB1/yellowBALL.jpg", frame)
while True:
ret, frame = cap.read()
for (lower, upper) in boundaries:
lower = np.array(lower, dtype = "uint8")
upper = np.array(upper, dtype = "uint8")
mask = cv2.inRange(frame, lower, upper)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
center = None
if len(cnts) > 4:
c = max(cnts, key=cv2.contourArea)
((x,y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
if radius > 10:
cv2.circle(frame, (int(x), int(y)), int(radius), (0,255,255),2)
cv2.circle(frame, center, 5, (0,0,255), -1)
x, y = int(x), int(y)
print x, y
if (x > 260) and (x < 360):
#continue
print "middle" # do nothing, in ideal middle location
elif (x <= 260) and (x > 100):
m.backward(.6)
sleep(.04)
m.stop()
## sleep(.5)
elif (x >= 360) and (x < 520):
m.forward(.6)
sleep(.04)
m.stop()
## sleep(.5)
elif (x <= 100):
m.backward(1)
sleep(.05)
m.stop()
elif (x >= 520):
m.forward(1)
sleep(.05)
m.stop()
else:
continue
output = cv2.bitwise_and(frame, frame, mask = mask)
cv2.imshow("frame", np.hstack([frame, output]))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
cv2.waitKey(1)
| [
"you@example.com"
] | you@example.com |
e17f60f13179da2c90f8214c18d24c574c53c20a | 3b8387d770b33850dca55a1f0657167906697b5b | /lowest_common_ancestor_of_binary_tree.py | e3bc2dc905f529b5c4578811cf5b4d16c7a2346c | [] | no_license | BigZihao/Leetcode | fe2795d5485e4780c1ec79558eaf9017a830a516 | 95ec42c241a4815a8b35f7a71948f1bc4e58b5b3 | refs/heads/master | 2021-01-13T00:59:12.489768 | 2018-06-21T21:50:33 | 2018-06-21T21:50:33 | 48,407,360 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,359 | py | class Solution(object):
def lowestCommonAncestor(self, root, p, q):
stack = [root]
parent = {root:None} ##{child:parent}
## DFS to traverse the tree and find p, q and store their parents
while p not in parent or q not in parent:
node = stack.pop()
if node.left:
parent[node.left] = node
stack.append(node.left)
if node.right:
parent[node.right] = node
stack.append(node.right)
ancestors = set()
while p:
ancestors.add(p) # store p's ancestor
p = parent[p]
while q not in ancestors:
q = parent[q] ## in p's ancestor, find common ancestor that q have
return q
## recursively
def lowestCommonAncestor(self, root, p, q):
if root is None:
return root
if root == p or root == q:
return root
left = self.lowestCommonAncestor(root.left, p, q)
right = self.lowestCommonAncestor(root.right, p, q)
if left is not None and right is not None:
return root
elif left is not None:
return left
elif right is not None:
return right
def lowestCommonAncestor2(self, root, p, q):
if root in (None, p, q): return root
left, right = (self.lowestCommonAncestor(kid, p, q)
for kid in (root.left, root.right))
return root if left and right else left or right | [
"zihao.zhang.ustb@gmail.com"
] | zihao.zhang.ustb@gmail.com |
0327848730d0d264c4abda695799aece3991e3e5 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_ovulated.py | 6f06677000201d1565d9797c550fe91cfc9f65d6 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py |
from xai.brain.wordbase.verbs._ovulate import _OVULATE
#calss header
class _OVULATED(_OVULATE, ):
def __init__(self,):
_OVULATE.__init__(self)
self.name = "OVULATED"
self.specie = 'verbs'
self.basic = "ovulate"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
1605d54927d7ec9cd4a1a024bd45ddef84e37771 | 6bd166d1c5d391cedcb9e65377ddfb36fff25f6d | /appointments/migrations/0006_auto_20210530_0150.py | 8de4c293855488963b5224a5cfbbbb41deaede14 | [] | no_license | meruyme/appointment_vaccines | 6e6a1a3384870e3a6d9cef277b236913510ab207 | 5fae1648459287ff488102ee0d1f288ee7abafdc | refs/heads/master | 2023-05-07T20:43:30.394116 | 2021-06-03T02:28:38 | 2021-06-03T02:28:38 | 371,268,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | # Generated by Django 3.2.3 on 2021-05-30 04:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('appointments', '0005_rename_birth_user_date_birth'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='admin',
new_name='is_admin',
),
migrations.RemoveField(
model_name='user',
name='staff',
),
]
| [
"mellany.linhares@academico.ifs.edu.br"
] | mellany.linhares@academico.ifs.edu.br |
d91e061ed280a62bdcb838399b78ee1e7dd3c4bd | 04930be3496264dee4ff6c1d6ce32e0e1b9d1513 | /film_project/manage.py | 8d4b83c1ae79cbf7796d432982f35bfebc398719 | [] | no_license | CarMoreno/PythonTulua-EjemploBasicoDRF | 86f49b059f0d27a7d05f3f4763303919b266f63f | 605e2da4841ec3fc1e152d5e5676f32f04e1369a | refs/heads/master | 2021-09-30T00:04:56.600834 | 2020-04-04T18:13:36 | 2020-04-04T18:13:36 | 253,067,811 | 1 | 0 | null | 2021-09-22T18:50:26 | 2020-04-04T18:12:32 | Python | UTF-8 | Python | false | false | 632 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'film_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"carlosandresmorenovelez@gmail.com"
] | carlosandresmorenovelez@gmail.com |
aea8949d0774fab61c25ab5cfa95652b71045b69 | 36df341a03d67fae0789dbc25fbccdfad8e65bdb | /dask_cloud/providers/aws/ecs.py | 74c2af6d769fe1580657fefcc57844176959e1dc | [
"BSD-3-Clause"
] | permissive | RPrudden/dask-cloud | 736d9d13be96714bfecc1460da025ea2ae33ddfd | a1ea87a78a9b54bca84d89281a22347c2c1fc807 | refs/heads/master | 2020-06-22T18:48:38.004823 | 2019-07-19T12:00:45 | 2019-07-19T12:00:45 | 197,778,331 | 0 | 0 | BSD-3-Clause | 2019-07-19T13:33:27 | 2019-07-19T13:33:26 | null | UTF-8 | Python | false | false | 39,073 | py | import asyncio
import logging
import sys
import time
import uuid
import warnings
import weakref
from botocore.exceptions import ClientError
import aiobotocore
import dask
from dask_cloud.utils.timeout import Timeout
from dask_cloud.providers.aws.helper import dict_to_aws, aws_to_dict
from distributed.deploy.spec import SpecCluster
logger = logging.getLogger(__name__)
DEFAULT_TAGS = {"createdBy": "dask-cloud"} # Package tags to apply to all resources
DEFAULT_CLUSTER_NAME_TEMPLATE = "dask-{uuid}"
class Task:
""" A superclass for managing ECS Tasks
Parameters
----------
clients: Dict[str, aiobotocore.client.Client]
References to the boto clients created by the cluster. These will be
used to interact with the AWS API.
cluster_arn: str
The ARN of the ECS cluster to launch the task in.
task_definition_arn: str
The ARN of the task definition that this object should use to launch
itself.
vpc_subnets: List[str]
The VPC subnets to use for the ENI that will be created when launching
this task.
security_groups: List[str]
The security groups to attach to the ENI that will be created when
launching this task.
log_group: str
The log group to send all task logs to.
log_stream_prefix: str
A prefix for the log stream that will be created automatically in the
log group when launching this task.
fargate: bool
Whether or not to launch with the Fargate launch type.
tags: str
AWS resource tags to be applied to any resources that are created.
loop: asyncio.EventLoop
A pointer to the asyncio event loop.
kwargs:
Any additional kwargs which may need to be stored for later use.
See Also
--------
Worker
Scheduler
"""
def __init__(
self,
clients,
cluster_arn,
task_definition_arn,
vpc_subnets,
security_groups,
log_group,
log_stream_prefix,
fargate,
tags,
loop,
**kwargs
):
self.lock = asyncio.Lock()
self.loop = loop
self._clients = clients
self.cluster_arn = cluster_arn
self.task_definition_arn = task_definition_arn
self.task = None
self.task_arn = None
self.task_type = None
self.public_ip = None
self.private_ip = None
self.log_group = log_group
self.log_stream_prefix = log_stream_prefix
self.connection = None
self.overrides = {}
self._vpc_subnets = vpc_subnets
self._security_groups = security_groups
self.fargate = fargate
self.tags = tags
self.kwargs = kwargs
self.status = "created"
def __await__(self):
async def _():
async with self.lock:
if not self.task:
await self.start()
assert self.task
return self
return _().__await__()
@property
def _use_public_ip(self):
return True # TODO Allow private only (needs NAT for image pull)
async def _is_long_arn_format_enabled(self):
[response] = (
await self._clients["ecs"].list_account_settings(
name="taskLongArnFormat", effectiveSettings=True
)
)["settings"]
return response["value"] == "enabled"
async def _update_task(self):
[self.task] = (
await self._clients["ecs"].describe_tasks(
cluster=self.cluster_arn, tasks=[self.task_arn]
)
)["tasks"]
async def _get_address_from_logs(self):
timeout = Timeout(
30, "Failed to find %s ip address after 30 seconds." % self.task_type
)
while timeout.run():
async for line in self.logs():
for query_string in ["worker at:", "Scheduler at:"]:
if query_string in line:
address = line.split(query_string)[1].strip()
if self._use_public_ip:
address = address.replace(self.private_ip, self.public_ip)
logger.debug("%s", line)
return address
else:
if not await self._task_is_running():
raise RuntimeError("%s exited unexpectedly!" % type(self).__name__)
continue
break
async def _task_is_running(self):
await self._update_task()
return self.task["lastStatus"] == "RUNNING"
async def start(self):
timeout = Timeout(60, "Unable to start %s after 60 seconds" % self.task_type)
while timeout.run():
try:
kwargs = (
{"tags": dict_to_aws(self.tags)}
if await self._is_long_arn_format_enabled()
else {}
) # Tags are only supported if you opt into long arn format so we need to check for that
[self.task] = (
await self._clients["ecs"].run_task(
cluster=self.cluster_arn,
taskDefinition=self.task_definition_arn,
overrides=self.overrides,
count=1,
launchType="FARGATE" if self.fargate else "EC2",
networkConfiguration={
"awsvpcConfiguration": {
"subnets": self._vpc_subnets,
"securityGroups": self._security_groups,
"assignPublicIp": "ENABLED"
if self._use_public_ip
else "DISABLED",
}
},
**kwargs
)
)["tasks"]
break
except Exception as e:
timeout.set_exception(e)
await asyncio.sleep(1)
self.task_arn = self.task["taskArn"]
while self.task["lastStatus"] in ["PENDING", "PROVISIONING"]:
await asyncio.sleep(1)
await self._update_task()
if not await self._task_is_running():
raise RuntimeError("%s failed to start" % type(self).__name__)
[eni] = [
attachment
for attachment in self.task["attachments"]
if attachment["type"] == "ElasticNetworkInterface"
]
[network_interface_id] = [
detail["value"]
for detail in eni["details"]
if detail["name"] == "networkInterfaceId"
]
eni = await self._clients["ec2"].describe_network_interfaces(
NetworkInterfaceIds=[network_interface_id]
)
[interface] = eni["NetworkInterfaces"]
self.public_ip = interface["Association"]["PublicIp"]
self.private_ip = interface["PrivateIpAddresses"][0]["PrivateIpAddress"]
self.address = await self._get_address_from_logs()
self.status = "running"
async def close(self, **kwargs):
if self.task:
await self._clients["ecs"].stop_task(
cluster=self.cluster_arn, task=self.task_arn
)
await self._update_task()
while self.task["lastStatus"] in ["RUNNING"]:
await asyncio.sleep(1)
await self._update_task()
self.status = "closed"
@property
def task_id(self):
return self.task_arn.split("/")[1]
@property
def _log_stream_name(self):
return "{prefix}/{container}/{task_id}".format(
prefix=self.log_stream_prefix,
container=self.task["containers"][0]["name"],
task_id=self.task_id,
)
async def logs(self):
next_token = None
while True:
if next_token:
l = await self._clients["logs"].get_log_events(
logGroupName=self.log_group,
logStreamName=self._log_stream_name,
nextToken=next_token,
)
else:
l = await self._clients["logs"].get_log_events(
logGroupName=self.log_group, logStreamName=self._log_stream_name
)
next_token = l["nextForwardToken"]
if not l["events"]:
break
for event in l["events"]:
yield event["message"]
def __repr__(self):
return "<ECS Task %s: status=%s>" % (type(self).__name__, self.status)
class Scheduler(Task):
""" A Remote Dask Scheduler controled by ECS
See :class:`Task` for parameter info.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.task_type = "scheduler"
class Worker(Task):
""" A Remote Dask Worker controled by ECS
Parameters
----------
scheduler: str
The address of the scheduler
kwargs: Dict()
Other kwargs to be passed to :class:`Task`.
"""
def __init__(self, scheduler: str, **kwargs):
super().__init__(**kwargs)
self.task_type = "worker"
self.scheduler = scheduler
self.overrides = {
"containerOverrides": [
{
"name": "dask-worker",
"environment": [
{"name": "DASK_SCHEDULER_ADDRESS", "value": self.scheduler}
],
}
]
}
class ECSCluster(SpecCluster):
""" Deploy a Dask cluster using ECS
This creates a dask scheduler and workers on an ECS cluster. If you do not
configure a cluster one will be created for you with sensible defaults.
Parameters
----------
fargate: bool (optional)
Select whether or not to use fargate.
Defaults to ``False``. You must provide an existing cluster.
image: str (optional)
The docker image to use for the scheduler and worker tasks.
Defaults to ``daskdev/dask:latest``.
scheduler_cpu: int (optional)
The amount of CPU to request for the scheduler in milli-cpu (1/1024).
Defaults to ``1024`` (one vCPU).
scheduler_mem: int (optional)
The amount of memory to request for the scheduler in MB.
Defaults to ``4096`` (4GB).
scheduler_timeout: str (optional)
The scheduler task will exit after this amount of time if there are no clients connected.
Defaults to ``5 minutes``.
worker_cpu: int (optional)
The amount of CPU to request for worker tasks in milli-cpu (1/1024).
Defaults to ``4096`` (four vCPUs).
worker_mem: int (optional)
The amount of memory to request for worker tasks in MB.
Defaults to ``16384`` (16GB).
n_workers: int (optional)
Number of workers to start on cluster creation.
Defaults to ``None``.
cluster_arn: str (optional if fargate is true)
The ARN of an existing ECS cluster to use for launching tasks.
Defaults to ``None`` which results in a new cluster being created for you.
cluster_name_template: str (optional)
A template to use for the cluster name if ``cluster_arn`` is set to
``None``.
Defaults to ``'dask-{uuid}'``
execution_role_arn: str (optional)
The ARN of an existing IAM role to use for ECS execution.
This ARN must have ``sts:AssumeRole`` allowed for
``ecs-tasks.amazonaws.com`` and allow the following permissions:
- ``ecr:GetAuthorizationToken``
- ``ecr:BatchCheckLayerAvailability``
- ``ecr:GetDownloadUrlForLayer``
- ``ecr:GetRepositoryPolicy``
- ``ecr:DescribeRepositories``
- ``ecr:ListImages``
- ``ecr:DescribeImages``
- ``ecr:BatchGetImage``
- ``logs:*``
- ``ec2:AuthorizeSecurityGroupIngress``
- ``ec2:Describe*``
- ``elasticloadbalancing:DeregisterInstancesFromLoadBalancer``
- ``elasticloadbalancing:DeregisterTargets``
- ``elasticloadbalancing:Describe*``
- ``elasticloadbalancing:RegisterInstancesWithLoadBalancer``
- ``elasticloadbalancing:RegisterTargets``
Defaults to ``None`` (one will be created for you).
task_role_arn: str (optional)
The ARN for an existing IAM role for tasks to assume. This defines
which AWS resources the dask workers can access directly. Useful if
you need to read from S3 or a database without passing credentials
around.
Defaults to ``None`` (one will be created with S3 read permission only).
task_role_policies: List[str] (optional)
If you do not specify a ``task_role_arn`` you may want to list some
IAM Policy ARNs to be attached to the role that will be created for you.
E.g if you need your workers to read from S3 you could add
``arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess``.
Default ``None`` (no policies will be attached to the role)
cloudwatch_logs_group: str (optional)
The name of an existing cloudwatch log group to place logs into.
Default ``None`` (one will be created called ``dask-ecs``)
cloudwatch_logs_stream_prefix: str (optional)
Prefix for log streams.
Defaults to the cluster name.
cloudwatch_logs_default_retention: int (optional)
Retention for logs in days. For use when log group is auto created.
Defaults to ``30``.
vpc: str (optional)
The ID of the VPC you wish to launch your cluster in.
Defaults to ``None`` (your default VPC will be used).
security_groups: List[str] (optional)
A list of security group IDs to use when launching tasks.
Defaults to ``None`` (one will be created which allows all traffic
between tasks and access to ports ``8786`` and ``8787`` from anywhere).
tags: dict (optional)
Tags to apply to all resources created automatically.
Defaults to ``None``. Tags will always include ``{"createdBy": "dask-cloud"}``
**kwargs: dict
Additional keyword arguments to pass to ``SpecCluster``.
Examples
--------
TODO Write ECSCluster examples docs
"""
def __init__(
self,
fargate=False,
image=None,
scheduler_cpu=None,
scheduler_mem=None,
scheduler_timeout=None,
worker_cpu=None,
worker_mem=None,
n_workers=None,
cluster_arn=None,
cluster_name_template=None,
execution_role_arn=None,
task_role_arn=None,
task_role_policies=None,
cloudwatch_logs_group=None,
cloudwatch_logs_stream_prefix=None,
cloudwatch_logs_default_retention=None,
vpc=None,
security_groups=None,
tags=None,
**kwargs
):
self._clients = None
self._fargate = fargate
self.image = image
self._scheduler_cpu = scheduler_cpu
self._scheduler_mem = scheduler_mem
self._scheduler_timeout = scheduler_timeout
self._worker_cpu = worker_cpu
self._worker_mem = worker_mem
self._n_workers = n_workers
self.cluster_arn = cluster_arn
self._cluster_name_template = cluster_name_template
self._execution_role_arn = execution_role_arn
self._task_role_arn = task_role_arn
self._task_role_policies = task_role_policies
self.cloudwatch_logs_group = cloudwatch_logs_group
self._cloudwatch_logs_stream_prefix = cloudwatch_logs_stream_prefix
self._cloudwatch_logs_default_retention = cloudwatch_logs_default_retention
self._vpc = vpc
self._security_groups = security_groups
self._tags = tags
super().__init__(**kwargs)
async def _start(self,):
while self.status == "starting":
await asyncio.sleep(0.01)
if self.status == "running":
return
if self.status == "closed":
raise ValueError("Cluster is closed")
# Cleanup any stale resources before we start
await _cleanup_stale_resources()
self.config = dask.config.get("cloud.ecs", {})
self._clients = await self._get_clients()
self._fargate = (
self.config.get("fargate", False)
if self._fargate is None
else self._fargate
)
self._tags = self.config.get("tags", {}) if self._tags is None else self._tags
self.image = (
self.config.get("image", "daskdev/dask:latest")
if self.image is None
else self.image
)
self._scheduler_cpu = (
self.config.get("scheduler_cpu", 1024)
if self._scheduler_cpu is None
else self._scheduler_cpu
)
self._scheduler_mem = (
self.config.get("scheduler_mem", 4096)
if self._scheduler_mem is None
else self._scheduler_mem
)
self._scheduler_timeout = (
self.config.get("scheduler_timeout", "5 minutes")
if self._scheduler_timeout is None
else self._scheduler_timeout
)
self._worker_cpu = (
self.config.get("worker_cpu", 4096)
if self._worker_cpu is None
else self._worker_cpu
)
self._worker_mem = (
self.config.get("worker_mem", 16384)
if self._worker_mem is None
else self._worker_mem
)
self._n_workers = (
self.config.get("n_workers", 0)
if self._n_workers is None
else self._n_workers
)
self.environment = {
"DASK_DISTRIBUTED__SCHEDULER__IDLE_TIMEOUT": self._scheduler_timeout
}
self.cluster_name = None
self._cluster_name_template = (
self.config.get("cluster_name", DEFAULT_CLUSTER_NAME_TEMPLATE)
if self._cluster_name_template is None
else self._cluster_name_template
)
self.cluster_arn = (
self.config.get("cluster_arn", await self._create_cluster())
if self.cluster_arn is None
else self.cluster_arn
)
if self.cluster_name is None:
[cluster_info] = (
await self._clients["ecs"].describe_clusters(
clusters=[self.cluster_arn]
)
)["clusters"]
self.cluster_name = cluster_info["clusterName"]
self._execution_role_arn = (
self.config.get("execution_role_arn", await self._create_execution_role())
if self._execution_role_arn is None
else self._execution_role_arn
)
self._task_role_policies = (
self.config.get("task_role_policies", [])
if self._task_role_policies is None
else self._task_role_policies
)
self._task_role_arn = (
self.config.get("task_role_arn", await self._create_task_role())
if self._task_role_arn is None
else self._task_role_arn
)
self._cloudwatch_logs_stream_prefix = (
self.config.get("cloudwatch_logs_stream_prefix", "{cluster_name}")
if self._cloudwatch_logs_stream_prefix is None
else self._cloudwatch_logs_stream_prefix
).format(cluster_name=self.cluster_name)
self._cloudwatch_logs_default_retention = (
self.config.get("cloudwatch_logs_default_retention", 30)
if self._cloudwatch_logs_default_retention is None
else self._cloudwatch_logs_default_retention
)
self.cloudwatch_logs_group = (
self.config.get(
"cloudwatch_logs_group", await self._create_cloudwatch_logs_group()
)
if self.cloudwatch_logs_group is None
else self.cloudwatch_logs_group
)
self._vpc = (
self.config.get("vpc", "default") if self._vpc is None else self._vpc
)
if self._vpc == "default":
self._vpc = await self._get_default_vpc()
self._vpc_subnets = await self._get_vpc_subnets()
self._security_groups = (
self.config.get("security_groups", await self._create_security_groups())
if self._security_groups is None
else self._security_groups
)
self.scheduler_task_definition_arn = (
await self._create_scheduler_task_definition_arn()
)
self.worker_task_definition_arn = (
await self._create_worker_task_definition_arn()
)
options = {
"clients": self._clients,
"cluster_arn": self.cluster_arn,
"vpc_subnets": self._vpc_subnets,
"security_groups": self._security_groups,
"log_group": self.cloudwatch_logs_group,
"log_stream_prefix": self._cloudwatch_logs_stream_prefix,
"fargate": self._fargate,
"tags": self.tags,
}
scheduler_options = {
"task_definition_arn": self.scheduler_task_definition_arn,
**options,
}
worker_options = {
"task_definition_arn": self.worker_task_definition_arn,
**options,
}
self.scheduler_spec = {"cls": Scheduler, "options": scheduler_options}
self.new_spec = {"cls": Worker, "options": worker_options}
self.worker_spec = {i: self.new_spec for i in range(self._n_workers)}
await super()._start()
@property
def tags(self):
return {**self._tags, **DEFAULT_TAGS, "cluster": self.cluster_name}
async def _get_clients(self):
session = aiobotocore.get_session()
weakref.finalize(self, self.sync, self._close_clients)
return {
"ec2": session.create_client("ec2"),
"ecs": session.create_client("ecs"),
"iam": session.create_client("iam"),
"logs": session.create_client("logs"),
}
async def _close_clients(self):
for client in self._clients.values():
await client.close()
async def _create_cluster(self):
if not self._fargate:
raise RuntimeError("You must specify a cluster when not using Fargate.")
self.cluster_name = dask.config.expand_environment_variables(
self._cluster_name_template
)
self.cluster_name = self.cluster_name.format(uuid=str(uuid.uuid4())[:10])
response = await self._clients["ecs"].create_cluster(
clusterName=self.cluster_name, tags=dict_to_aws(self.tags)
)
weakref.finalize(self, self.sync, self._delete_cluster)
return response["cluster"]["clusterArn"]
async def _delete_cluster(self):
async for page in self._clients["ecs"].get_paginator("list_tasks").paginate(
cluster=self.cluster_arn, desiredStatus="RUNNING"
):
for task in page["taskArns"]:
await self._clients["ecs"].stop_task(
cluster=self.cluster_arn, task=task
)
await self._clients["ecs"].delete_cluster(cluster=self.cluster_arn)
@property
def _execution_role_name(self):
return "{}-{}".format(self.cluster_name, "execution-role")
async def _create_execution_role(self):
response = await self._clients["iam"].create_role(
RoleName=self._execution_role_name,
AssumeRolePolicyDocument="""{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ecs-tasks.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}""",
Description="A role for ECS to use when executing",
Tags=dict_to_aws(self.tags, upper=True),
)
await self._clients["iam"].attach_role_policy(
RoleName=self._execution_role_name,
PolicyArn="arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
)
await self._clients["iam"].attach_role_policy(
RoleName=self._execution_role_name,
PolicyArn="arn:aws:iam::aws:policy/CloudWatchLogsFullAccess",
)
await self._clients["iam"].attach_role_policy(
RoleName=self._execution_role_name,
PolicyArn="arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceRole",
)
weakref.finalize(self, self.sync, self._delete_role, self._execution_role_name)
return response["Role"]["Arn"]
@property
def _task_role_name(self):
return "{}-{}".format(self.cluster_name, "task-role")
async def _create_task_role(self):
response = await self._clients["iam"].create_role(
RoleName=self._task_role_name,
AssumeRolePolicyDocument="""{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ecs-tasks.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}""",
Description="A role for dask tasks to use when executing",
Tags=dict_to_aws(self.tags, upper=True),
)
for policy in self._task_role_policies:
await self._clients["iam"].attach_role_policy(
RoleName=self._task_role_name, PolicyArn=policy
)
weakref.finalize(self, self.sync, self._delete_role, self._task_role_name)
return response["Role"]["Arn"]
async def _delete_role(self, role):
attached_policies = (
await self._clients["iam"].list_attached_role_policies(RoleName=role)
)["AttachedPolicies"]
for policy in attached_policies:
await self._clients["iam"].detach_role_policy(
RoleName=role, PolicyArn=policy["PolicyArn"]
)
await self._clients["iam"].delete_role(RoleName=role)
async def _create_cloudwatch_logs_group(self):
log_group_name = "dask-ecs"
if log_group_name not in [
group["logGroupName"]
for group in (await self._clients["logs"].describe_log_groups())[
"logGroups"
]
]:
await self._clients["logs"].create_log_group(
logGroupName=log_group_name, tags=self.tags
)
await self._clients["logs"].put_retention_policy(
logGroupName=log_group_name,
retentionInDays=self._cloudwatch_logs_default_retention,
)
# Note: Not cleaning up the logs here as they may be useful after the cluster is destroyed
return log_group_name
async def _get_default_vpc(self):
vpcs = (await self._clients["ec2"].describe_vpcs())["Vpcs"]
[vpc] = [vpc for vpc in vpcs if vpc["IsDefault"]]
return vpc["VpcId"]
async def _get_vpc_subnets(self):
vpcs = (await self._clients["ec2"].describe_vpcs())["Vpcs"]
[vpc] = [vpc for vpc in vpcs if vpc["VpcId"] == self._vpc]
subnets = (await self._clients["ec2"].describe_subnets())["Subnets"]
return [
subnet["SubnetId"] for subnet in subnets if subnet["VpcId"] == vpc["VpcId"]
]
async def _create_security_groups(self):
response = await self._clients["ec2"].create_security_group(
Description="A security group for dask-ecs",
GroupName=self.cluster_name,
VpcId=self._vpc,
DryRun=False,
)
await self._clients["ec2"].authorize_security_group_ingress(
GroupId=response["GroupId"],
IpPermissions=[
{
"IpProtocol": "TCP",
"FromPort": 8786,
"ToPort": 8787,
"IpRanges": [{"CidrIp": "0.0.0.0/0", "Description": "Anywhere"}],
"Ipv6Ranges": [{"CidrIpv6": "::/0", "Description": "Anywhere"}],
},
{
"IpProtocol": "TCP",
"FromPort": 0,
"ToPort": 65535,
"UserIdGroupPairs": [{"GroupName": self.cluster_name}],
},
],
DryRun=False,
)
await self._clients["ec2"].create_tags(
Resources=[response["GroupId"]], Tags=dict_to_aws(self.tags, upper=True)
)
weakref.finalize(self, self.sync, self._delete_security_groups)
return [response["GroupId"]]
async def _delete_security_groups(self):
timeout = Timeout(
30, "Unable to delete AWS security group " + self.cluster_name, warn=True
)
while timeout.run():
try:
await self._clients["ec2"].delete_security_group(
GroupName=self.cluster_name, DryRun=False
)
except Exception:
await asyncio.sleep(2)
break
async def _create_scheduler_task_definition_arn(self):
response = await self._clients["ecs"].register_task_definition(
family="{}-{}".format(self.cluster_name, "scheduler"),
taskRoleArn=self._task_role_arn,
executionRoleArn=self._execution_role_arn,
networkMode="awsvpc",
containerDefinitions=[
{
"name": "dask-scheduler",
"image": self.image,
"cpu": self._scheduler_cpu,
"memory": self._scheduler_mem,
"memoryReservation": self._scheduler_mem,
"essential": True,
"environment": dict_to_aws(self.environment, key_string="name"),
"command": ["dask-scheduler"],
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-region": self._clients["ecs"].meta.region_name,
"awslogs-group": self.cloudwatch_logs_group,
"awslogs-stream-prefix": self._cloudwatch_logs_stream_prefix,
"awslogs-create-group": "true",
},
},
}
],
volumes=[],
requiresCompatibilities=["FARGATE"] if self._fargate else [],
cpu=str(self._scheduler_cpu),
memory=str(self._scheduler_mem),
tags=dict_to_aws(self.tags),
)
weakref.finalize(self, self.sync, self._delete_scheduler_task_definition_arn)
return response["taskDefinition"]["taskDefinitionArn"]
async def _delete_scheduler_task_definition_arn(self):
await self._clients["ecs"].deregister_task_definition(
taskDefinition=self.scheduler_task_definition_arn
)
async def _create_worker_task_definition_arn(self):
response = await self._clients["ecs"].register_task_definition(
family="{}-{}".format(self.cluster_name, "worker"),
taskRoleArn=self._task_role_arn,
executionRoleArn=self._execution_role_arn,
networkMode="awsvpc",
containerDefinitions=[
{
"name": "dask-worker",
"image": self.image,
"cpu": self._worker_cpu,
"memory": self._worker_mem,
"memoryReservation": self._worker_mem,
"essential": True,
"environment": dict_to_aws(self.environment, key_string="name"),
"command": [
"dask-worker",
"--nthreads",
"{}".format(int(self._worker_cpu / 1024)),
"--memory-limit",
"{}GB".format(int(self._worker_mem / 1024)),
"--death-timeout",
"60",
],
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-region": self._clients["ecs"].meta.region_name,
"awslogs-group": self.cloudwatch_logs_group,
"awslogs-stream-prefix": self._cloudwatch_logs_stream_prefix,
"awslogs-create-group": "true",
},
},
}
],
volumes=[],
requiresCompatibilities=["FARGATE"] if self._fargate else [],
cpu=str(self._worker_cpu),
memory=str(self._worker_mem),
tags=dict_to_aws(self.tags),
)
weakref.finalize(self, self.sync, self._delete_worker_task_definition_arn)
return response["taskDefinition"]["taskDefinitionArn"]
async def _delete_worker_task_definition_arn(self):
await self._clients["ecs"].deregister_task_definition(
taskDefinition=self.worker_task_definition_arn
)
class FargateCluster(ECSCluster):
"""Deploy a Dask cluster using Fargate on ECS
This creates a dask scheduler and workers on a Fargate powered ECS cluster.
If you do not configure a cluster one will be created for you with sensible
defaults.
Parameters
----------
kwargs: dict
Keyword arguments to be passed to :class:`ECSCluster`.
"""
def __init__(self, **kwargs):
super().__init__(fargate=True, **kwargs)
async def _cleanup_stale_resources():
""" Clean up any stale resources which are tagged with 'createdBy': 'dask-cloud'.
This function will scan through AWS looking for resources that were created
by the ``ECSCluster`` class. Any ECS clusters which do not have any running
tasks will be deleted and then any supporting resources such as task definitions
security groups and IAM roles that are not associated with an active cluster
will also be deleted.
The ``ECSCluster`` should clean up after itself when it is garbage collected
however if the Python process is terminated without notice this may not happen.
Therefore this is useful to remove shrapnel from past failures.
"""
# Clean up clusters (clusters with no running tasks)
session = aiobotocore.get_session()
async with session.create_client("ecs") as ecs:
active_clusters = []
clusters_to_delete = []
async for page in ecs.get_paginator("list_clusters").paginate():
clusters = (
await ecs.describe_clusters(
clusters=page["clusterArns"], include=["TAGS"]
)
)["clusters"]
for cluster in clusters:
if DEFAULT_TAGS.items() <= aws_to_dict(cluster["tags"]).items():
if cluster["runningTasksCount"] == 0:
clusters_to_delete.append(cluster["clusterArn"])
else:
active_clusters.append(cluster["clusterName"])
for cluster_arn in clusters_to_delete:
await ecs.delete_cluster(cluster=cluster_arn)
# Clean up task definitions (with no active clusters)
async for page in ecs.get_paginator("list_task_definitions").paginate():
for task_definition_arn in page["taskDefinitionArns"]:
response = await ecs.describe_task_definition(
taskDefinition=task_definition_arn, include=["TAGS"]
)
task_definition = response["taskDefinition"]
task_definition["tags"] = response["tags"]
task_definition_cluster = aws_to_dict(task_definition["tags"]).get(
"cluster"
)
if (
task_definition_cluster is None
or task_definition_cluster not in active_clusters
):
await ecs.deregister_task_definition(
taskDefinition=task_definition_arn
)
# Clean up security groups (with no active clusters)
async with session.create_client("ec2") as ec2:
async for page in ec2.get_paginator("describe_security_groups").paginate(
Filters=[{"Name": "tag:createdBy", "Values": ["dask-cloud"]}]
):
for group in page["SecurityGroups"]:
sg_cluster = aws_to_dict(group["Tags"]).get("cluster")
if sg_cluster is None or sg_cluster not in active_clusters:
await ec2.delete_security_group(
GroupName=group["GroupName"], DryRun=False
)
# Clean up roles (with no active clusters)
async with session.create_client("iam") as iam:
async for page in iam.get_paginator("list_roles").paginate():
for role in page["Roles"]:
role["Tags"] = (
await iam.list_role_tags(RoleName=role["RoleName"])
).get("Tags")
if DEFAULT_TAGS.items() <= aws_to_dict(role["Tags"]).items():
role_cluster = aws_to_dict(role["Tags"]).get("cluster")
if role_cluster is None or role_cluster not in active_clusters:
attached_policies = (
await iam.list_attached_role_policies(
RoleName=role["RoleName"]
)
)["AttachedPolicies"]
for policy in attached_policies:
await iam.detach_role_policy(
RoleName=role["RoleName"], PolicyArn=policy["PolicyArn"]
)
await iam.delete_role(RoleName=role["RoleName"])
# TODO Add CLI option for running ``$ dask-ecs [--flags]``
# TODO Awaiting the cluster class seems to hang forever
# This seems to be related to ``await self.scheduler_comm.identity()`` hanging every other time you call it.
# TODO Consolidate finalization tasks
# To be certain that we are finalizing in the correct order we could have a clean up method which
# finalizes everything in one place. We could weakref it from ``_start``.
# TODO Catch all credential errors.
# Not all users will be able to create all the resources necessary for a default cluster.
# We should catch any permissions errors that come back from AWS and cleanly tear everything back down and raise.
# TODO Get adaptive working.
# Currently there is an issue with the ``Adaptive`` class not being able to speak to the ``scheduler_comm`` from ``SpecCluster``
| [
"jtomlinson@nvidia.com"
] | jtomlinson@nvidia.com |
70f2851470ae7f401f457fdd77554c7d15dc0c03 | cbe11cf78660d226dee357842ef0725ce0bea4ac | /ABB_Server_AVL/Grafo.py | be26118edda48087164d55b572c6cd0c6f54f4b9 | [] | no_license | randolph182/EDD_S2_2021 | 5dbe1b2d8bde7830b081d9d0973693bd17e68035 | 4a48b3385e778b32dcc4cf674a4fdad01a19bf9c | refs/heads/main | 2023-08-11T14:08:32.659463 | 2021-09-25T22:03:27 | 2021-09-25T22:03:41 | 392,873,756 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | import os
class Grafo:
def __init__(self):
pass
def graficarArbol(self, raiz):
acumuladores = ["digraph G{\nnode [shape=circle];\n", ""]
if raiz != None:
self.recorrerArbol(raiz,acumuladores)
acumuladores[0] += acumuladores[1] + "\n}"
f = open('grafo.dot', 'w')
try:
f.write(acumuladores[0])
finally:
f.close()
prog = "dot -Tsvg grafo.dot -o grafo.svg"
os.system(prog)
def recorrerArbol(self, raiz,acum):
if raiz:
acum[1] += '"{}"[label="{}"];\n'.format(str(hash(raiz)),str(raiz.id))
if raiz.izq.raiz != None:
acum[1] += '"{}" -> "{}";\n'.format(str(hash(raiz)),str(hash(raiz.izq.raiz)))
if raiz.der.raiz != None:
acum[1] += '"{}" -> "{}";\n'.format(str(hash(raiz)), str(hash(raiz.der.raiz)))
self.recorrerArbol(raiz.izq.raiz, acum)
self.recorrerArbol(raiz.der.raiz, acum)
| [
"randolph.estuardo.2012@gmail.com"
] | randolph.estuardo.2012@gmail.com |
0a9eddb20e67769794714fcd87c3602f2ebe64aa | 17926b196d9db43816453d16f3da84de6664f2fd | /17_Letter_Combinations_of_a_Phone_Number.py | 0070ec79f7b827c9126f11b624a96c4873b8d56f | [] | no_license | luchang59/leetcode | 66690a3c9b28a5201a7be8cd0134142b48418adb | feab001b9291f6e57c44eeb0b625fdaa145d19b4 | refs/heads/master | 2020-05-28T06:57:20.667138 | 2019-09-20T18:18:11 | 2019-09-20T18:18:11 | 188,914,681 | 0 | 0 | null | 2019-05-27T22:17:44 | 2019-05-27T22:08:54 | null | UTF-8 | Python | false | false | 718 | py | class Solution:
def letterCombinations(self, digits: str):
if not digits: return []
letter = {
'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z']
}
res = []
def backtrack(digit, path):
if len(digit) == 0:
res.append(path)
return
for c in letter[digit[0]]:
backtrack(digit[1:], path + c)
backtrack(digits, '')
return res | [
"luchang1991@gmail.com"
] | luchang1991@gmail.com |
5a4f01d01aa9c44beaffdd665c19b9b413826806 | a08d775768c67b8ec34cad661af869b6e6657a7c | /pythonpro/uiauto_test/mkiller.py | a2542d9e3a28f039bb4888faf349ad32b5f780bd | [] | no_license | weixiaovision/Python | ec41172db61d39183d361ba918a395978ec01b62 | 93ad2c1f2d77bd2adc58d1fcc6bde7408157d09c | refs/heads/master | 2021-01-19T23:14:04.838483 | 2017-08-30T12:28:56 | 2017-08-30T12:28:56 | 88,940,228 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 5,578 | py | coding:gbk
import os, sys, time, re, csv
import log
import util
from uiautomator import Device
import traceback
import log, logging
import multiprocessing
optpath = os.getcwd() # 获取当前操作目录
imgpath = os.path.join(optpath, 'img') # 截图目录
def cleanEnv():
os.system('adb kill-server')
needClean = ['log.log', 'img', 'rst']
pwd = os.getcwd()
for i in needClean:
delpath = os.path.join(pwd, i)
if os.path.isfile(delpath):
cmd = 'del /f/s/q "%s"' % delpath
os.system(cmd)
elif os.path.isdir(delpath):
cmd = 'rd /s/q "%s"' % delpath
os.system(cmd)
if not os.path.isdir('rst'):
os.mkdir('rst')
def runwatch(d, data):
times = 120
while True:
if data == 1:
return True
# d.watchers.reset()
d.watchers.run()
times -= 1
if times == 0:
break
else:
time.sleep(0.5)
def installapk(apklist, d, device):
sucapp = []
errapp = []
# d = Device(device)
# 初始化一个结果文件
d.screen.on()
rstlogger = log.Logger('rst/%s.log' % device, clevel=logging.DEBUG, Flevel=logging.INFO)
# 先安装mkiller
mkillerpath = os.path.join(os.getcwd(), 'MKiller_1001.apk')
cmd = 'adb -s %s install -r %s' % (device, mkillerpath)
util.exccmd(cmd)
def checkcancel(d, sucapp, errapp):
times = 10
while (times):
if d(textContains=u'取消安装').count:
print
d(textContains=u'取消安装', className='android.widget.Button').info['text']
d(textContains=u'取消安装', className='android.widget.Button').click()
rstlogger.info(device + '测试成功,有弹出取消安装对话框')
break
else:
time.sleep(1)
times -= 1
if times == 0:
rstlogger.error(device + '测试失败,没有弹出取消安装对话框')
try:
d.watcher('allowroot').when(text=u'允许').click(text=u'允许')
d.watcher('install').when(text=u'安装').when(textContains=u'是否要安装该应用程序').click(text=u'安装',
className='android.widget.Button') # 专门为小米弹出的安装拦截
d.watcher('cancel').when(text=u'取消').when(textContains=u'超强防护能够极大提高').click(text=u'取消')
d.watcher('confirm').when(text=u'确认').when(textContains=u'应用程序许可').click(text=u'确认')
d.watcher('agree').when(text=u'同意并使用').click(text=u'同意并使用')
d.watcher('weishiuninstall').when(textContains=u'暂不处理').click(textContains=u'暂不处理')
# d.watchers.run()
data = 0
util.doInThread(runwatch, d, data, t_setDaemon=True)
# 启动急救箱并退出急救箱
cmd = 'adb -s %s shell am start com.qihoo.mkiller/com.qihoo.mkiller.ui.index.AppEnterActivity' % device
util.exccmd(cmd)
time.sleep(5)
times = 3
while (times):
d.press.back()
if d(text=u'确认').count:
d(text=u'确认').click()
break
else:
time.sleep(1)
times -= 1
for item in apklist:
apkpath = item
if not os.path.exists(apkpath):
logger.error('%s的应用不存在,请检查' % apkpath)
continue
if not device:
cmd = 'adb install -r "%s"' % apkpath
else:
cmd = 'adb -s %s install -r "%s"' % (device, apkpath)
util.doInThread(checkcancel, d, sucapp, errapp)
rst = util.exccmd(cmd)
except Exception, e:
logger.error(traceback.format_exc())
data = 1
data = 1
return sucapp
def finddevices():
rst = util.exccmd('adb devices')
devices = re.findall(r'(.*?)\s+device', rst)
if len(devices) > 1:
deviceIds = devices[1:]
logger.info('共找到%s个手机' % str(len(devices) - 1))
for i in deviceIds:
logger.info('ID为%s' % i)
return deviceIds
else:
logger.error('没有找到手机,请检查')
return
# needcount:需要安装的apk数量,默认为0,既安所有
# deviceids:手机的列表
# apklist:apk应用程序的列表
def doInstall(deviceids, apklist):
count = len(deviceids)
port_list = range(5555, 5555 + count)
for i in range(len(deviceids)):
d = Device(deviceids[i], port_list[i])
util.doInThread(installapk, apklist, d, deviceids[i])
# 结束应用
def uninstall(deviceid, packname, timeout=20):
cmd = 'adb -s %s uninstall %s' % (deviceid, packname)
ft = util.doInThread(os.system, cmd, t_setDaemon=True)
while True:
if ft.isFinished():
return True
else:
time.sleep(1)
timeout -= 1
if timeout == 0:
return False
# 需要配置好adb 环境变量
# 1.先确定有几台手机
# 2.再确定有多少个应用
# 3.先安装mkiller,启动mkiller
# 4.再安装测试的样本
# 5.检查是否有取消安装的按钮出现,出现说明测试通过,没出现说明测试失败
if __name__ == "__main__":
cleanEnv()
logger = util.logger
devicelist = finddevices()
if devicelist:
apkpath = os.path.join(os.getcwd(), 'apk') | [
"185858747@qq.com"
] | 185858747@qq.com |
9ffc6bfb9f0c15ae0f9774de27b77f70dd1d4ea3 | 04f1e56dc28394b836d806cb67083e1e9a113605 | /pos_quebec/pos_quebec.py | 19beae0cde20af2facb89f3800306845a0f2cbbb | [] | no_license | norbertoru/OpenERP-POS-Proxy | 4fcfdc5a7fb063d34f5662a914a32b870c69339e | be58d46800848281bba1a270f7e3a6902415dc5f | refs/heads/master | 2020-03-26T23:54:52.330364 | 2014-01-14T22:39:10 | 2014-01-14T22:39:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,609 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Module : pos_quebec
# Créé le : 2014-01-14 par ZEA Informatique Inc.
#
# Ajout de TPS et TVQ sur la facture dans Point of Sale et proxy d\'écran client
#
##############################################################################
import openerp
from openerp import netsvc, tools, pooler
from openerp.osv import fields, osv
from openerp.tools.translate import _
import time
class inherit_pos_order_for_quebec(osv.osv):
_name='pos.order'
_inherit='pos.order'
def get_tax_detail(self, cr, uid, tax_browse, fields, context=None):
account_tax_obj = self.pool.get('account.tax')
taxes = {}
tax_result = account_tax_obj.read(cr, uid, tax_browse.id, fields, context=context)
if tax_browse.child_ids:
child_result = []
for child in tax_browse.child_ids:
child_result.append(self.get_tax_detail(cr, uid, child, fields, context=context))
tax_result['child_ids'] = child_result
return tax_result
def get_all_taxes(self, cr, uid, fields, context=None):
account_tax_obj = self.pool.get('account.tax')
tax_ids = account_tax_obj.search(cr, uid, [('parent_id', '=', False)], context=context)
result = []
for tax_browse in account_tax_obj.browse(cr, uid, tax_ids, context=context):
tax_result = self.get_tax_detail(cr, uid, tax_browse, fields, context=context)
result.append(tax_result)
return result
inherit_pos_order_for_quebec()
| [
"alexandre@zinfo.ca"
] | alexandre@zinfo.ca |
5e6db6e2bff92bf049713f6157d25a3da970b658 | 78492051c34b7f8b67a011c40b9e9e7d49c405fe | /build/lib/gazel/edits.py | 512efa476edcd45d0fa51adffd227dfe18e73c5d | [
"MIT"
] | permissive | PengKuang/gazel | 40323a8e560084149ff27c261ae71fe3e223ac1f | 2d45d7edea484076d6319a68bef3cff250de035c | refs/heads/main | 2023-04-30T14:20:04.701779 | 2021-05-27T17:47:45 | 2021-05-27T17:47:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,441 | py | from typing import Callable, List, Literal, Optional, Tuple, Union
import pampy
from gazel.common import Id
from gazel.core_constructors import make_snapshot
from gazel.core_types import Snapshot, Token, TokenChange
from gazel.range import (
_shift_range,
get_token_at_range,
range_contains,
range_overlaps,
same_point_range,
)
def insert(source: str, start: int, text: str) -> str:
return source[:start] + text + source[start:]
def delete(source: str, start: int, size: int) -> str:
return source[:start] + source[start + size :]
def get_change(old: Token = None, new: Token = None) -> Optional[TokenChange]:
if old and new:
if not same_point_range(old.range, new.range):
return TokenChange(type="moved", old=old, new=new)
elif not old and new:
return TokenChange(type="inserted", old=old, new=new)
elif old and not new:
return TokenChange(type="deleted", old=old, new=new)
return None
def _adjust_tokens_for_edit(
old_snapshot: Snapshot,
new_snapshot: Snapshot,
direction: Literal["left", "right"],
affected_index_range: Tuple[int, int],
next_id=Id(),
):
shift = affected_index_range[1] - affected_index_range[0]
shift = pampy.match(direction, "left", -shift, "right", shift)
adjusted_tokens: List[Token] = []
changes: List[TokenChange] = []
# FIXME
# deleted tokens do not get reported to
# the change list.
for token in new_snapshot.tokens:
if range_overlaps(
affected_index_range, (token.range.start.index, token.range.end.index)
):
old_token = get_token_at_range(old_snapshot, affected_index_range)
new_token = Token(token.range, token.source, next_id(), token.syntax_node)
change = None
if old_token:
old_token_index_range = (
old_token.range.start.index,
old_token.range.end.index,
)
if range_contains(
old_token_index_range, affected_index_range
): # means we deleted, and hence
new_token = Token(
new_token.range,
new_token.source,
old_token.id,
new_token.syntax_node,
)
change = TokenChange(type="edited", old=old_token, new=new_token)
else:
change = get_change(old=None, new=new_token)
adjusted_tokens.append(new_token)
if change:
changes.append(change)
else:
# only tokens that start after the
# end of the affected range need
# to be adjusted
if token.range.start.index >= affected_index_range[1]:
old_range = _shift_range(
token.range, -shift, old_snapshot.source.mapping
)
if old_range in old_snapshot._token_by_range:
old_token = old_snapshot._token_by_range[old_range]
new_token = Token(
source=token.source,
range=token.range,
syntax_node=token.syntax_node,
id=old_token.id,
)
adjusted_tokens.append(new_token)
change = get_change(old_token, new_token)
if change:
changes.append(change)
return adjusted_tokens, changes
def token_info_for_delete(
old_snapshot: Snapshot, line: int, col: int, size: int, next_id=Id(), id=0, time=0.0
):
start = old_snapshot.source.mapping[line, col]
affected_range = (start, start + size)
new_source = delete(old_snapshot.source.text, start, size)
new_snapshot = make_snapshot(new_source, old_snapshot.source.language, index=id,)
tokens, changes = _adjust_tokens_for_edit(
old_snapshot, new_snapshot, "left", affected_range, next_id
)
return Snapshot(
id=new_snapshot.id,
source=new_snapshot.source,
tokens=tokens,
changes=changes,
_token_by_range=new_snapshot._token_by_range,
time=time,
)
def token_info_for_insert(
old_snapshot: Snapshot, line: int, col: int, text: str, next_id=Id(), id=0, time=0.0
):
start = old_snapshot.source.mapping[line, col]
new_source = insert(old_snapshot.source.text, start, text)
new_snapshot = make_snapshot(new_source, old_snapshot.source.language, index=id,)
affected_range = (start, start + len(text))
tokens, changes = _adjust_tokens_for_edit(
old_snapshot, new_snapshot, "right", affected_range, next_id
)
return Snapshot(
id=new_snapshot.id,
source=new_snapshot.source,
tokens=tokens,
changes=changes,
_token_by_range=new_snapshot._token_by_range,
time=time,
)
def _perform_aggregated_edit(
snapshot: Snapshot, edits: dict, next_id: Callable[[], Union[str, float, int]], id=0
) -> Snapshot:
current_snapshot = snapshot
changes: List[TokenChange] = []
for edit in edits["edits"]:
current_snapshot = edit_source(current_snapshot, edit, next_id, id=id)
changes.extend(current_snapshot.changes)
return Snapshot(
id=id,
source=current_snapshot.source,
tokens=current_snapshot.tokens,
time=edits["edits"][0]["timestamp"],
changes=tuple(changes),
_token_by_range=current_snapshot._token_by_range,
)
def edit_source(
snapshot: Snapshot, edit: dict, next_id: Callable[[], Union[str, float, int]], id=0
) -> Snapshot:
return pampy.match(
edit,
{"type": "insert"},
lambda edit: token_info_for_insert(
snapshot,
line=edit["row"],
col=edit["col"],
text=edit["text"],
next_id=next_id,
id=id,
time=edit["timestamp"],
),
{"type": "delete"},
lambda edit: token_info_for_delete(
snapshot,
line=edit["row"],
col=edit["col"],
size=edit["len"],
next_id=next_id,
id=id,
time=edit["timestamp"],
),
{"type": "aggregated"},
lambda edit: _perform_aggregated_edit(snapshot, edit, next_id, id=id),
)
| [
"devjeetrr@gmail.com"
] | devjeetrr@gmail.com |
a5155d2767b7b2ebaa07bc5b9fa121b3f52383fc | ec5bbdba8deac36ccfad6d9e1b51f616859245ea | /models/plugin_issuegrp.py | 41369346b7b16b2888a7e9f2b346e4bb9738c3a0 | [
"LicenseRef-scancode-public-domain"
] | permissive | manuelep/itracker_beta | bc21e9cfeea715d3a953211a2e182d814a07aa7f | fdf62de698f3886e2679043995901fc50e2016fa | refs/heads/master | 2016-09-05T10:31:58.259757 | 2015-06-12T06:56:22 | 2015-06-12T06:56:22 | 34,455,984 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,734 | py | # -*- coding: utf-8 -*-
if not 'plugin_shared_tools' in globals():
import plugin_shared_tools
Populator, IssueReferences, UniqueDefault = plugin_shared_tools.Populator, plugin_shared_tools.IssueReferences, plugin_shared_tools.UniqueDefault
# Ticket Types (Problem/Bug, Enhancement, Issue)
db.define_table('issuegrp_type',
Field('label', required=True),
Field('description', 'text'),
Field('is_default', 'boolean', required=True),
format = '%(label)s'
)
db.define_table('issuegrp',
Field('title', requires=IS_NOT_EMPTY()),
Field('description', 'text',
comment=XML(T("MARKMIN text syntax is accepted.").replace('MARKMIN', str(STRONG(A('MARKMIN', _href="http://web2py.com/books/default/chapter/29/05#markmin_markmin_syntax"))))),
represent=lambda v, r: MARKMIN(v)
),
Field('typology', 'reference issuegrp_type', label=T('Type'), represent=lambda v,r: db.issuegrp_type[v].label),
Field('slugs', 'list:string', label=T("Wiki pages"), readable=False,
represent = lambda v,r: SPAN(*map(lambda e: A(e, _href=URL('wiki', 'index', args=('issue_%s' % r.id +'_'+e,))), v))),
auth.signature,
format = '%(title)s'
)
####################
# Callbacks
####################
for tablename in db.tables:
if tablename.startswith('issuegrp_') and 'is_default' in db[tablename].fields:
db[tablename]._before_update.append(UniqueDefault(db[tablename]))
####################
# Default values
####################
Populator.bulk(
db.issuegrp_type,
[
dict(label='Task', description='A generic description of something to do', is_default=True),
dict(label='Milestone', description='A group of issue with a precise aim', is_default=False),
]
)
| [
"manuele@valis-e.com"
] | manuele@valis-e.com |
108c014294d4e1e7d07e30d53535bde40e120770 | f16c425394bac765322a7ff7093ddb9e00ecf996 | /Principles of Programming/Quiz/quiz7/5148463.files/quiz_7.py | ab3f6542c29eb3afd980cef2805b382a25c8d364 | [] | no_license | hanxuwu/Learning-Python | d2a2c3ece67a51eaecf05995cdcba911c4cd013f | 09338b76987f5c84240b1599b3f4c6f270ecee59 | refs/heads/master | 2021-05-12T13:51:22.258704 | 2018-01-19T12:56:34 | 2018-01-19T12:56:34 | 116,941,589 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | # Generates a linked list of a length determined by user input,
# consisting of random nonnegative integers whose upper bound is also determined
# by user input, and reorders the list so that it starts with all odd values and
# ends with all even values, preserving the order of odd and even values in the
# original list, respectively.
#
# Written by Eric Martin for COMP9021
import sys
from random import seed, randrange
from extended_linked_list import ExtendedLinkedList
def collect_references(L, length):
node = L.head
references = set()
for i in range(length):
references.add(id(node))
node = node.next_node
return references
try:
for_seed, length, upper_bound = [int(i) for i in input('Enter three nonnegative integers: '
).split()
]
if for_seed < 0 or length < 0 or upper_bound < 0:
raise ValueError
except ValueError:
print('Incorrect input, giving up.')
sys.exit()
seed(for_seed)
LL = ExtendedLinkedList([randrange(upper_bound + 1) for _ in range(length)])
LL.print()
references = collect_references(LL, length)
LL.rearrange()
if collect_references(LL, length) != references:
print('You cheated!')
sys.exit()
else:
LL.print()
| [
"skunder301@gmail.com"
] | skunder301@gmail.com |
a5ed5080031328564f118422ad983a2711a152f8 | 2430f722589f6a8ba2d139cc1682492d7c934fbb | /python/fscam_caller.py | 4a3d45c0d0e2a5f6bf3c1f05ed074c85eeb22d6e | [] | no_license | ncicek/smart_garden | 20de30d61302ddf01a920a1e5920425b92e006e6 | 79a42a10661f03ecbbb206ece48bae0ccdcde83f | refs/heads/master | 2021-09-12T08:22:30.631135 | 2018-04-15T15:39:41 | 2018-04-15T15:39:41 | 118,811,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | from subprocess import call
#calls fswebcam, takes a pic, and returns the path to that pic
def take_photo():
photo_filename = a.bmp
call(["fswebcam","--no-banner","-r 1280x720",photo_filename])
return photo_filename | [
"ciceknebi@gmail.com"
] | ciceknebi@gmail.com |
f073b2d1849f5a6c6ae0a1b78c8871e9785b212c | e8fc00f58e676daaf037a10c945ec6ccfdacb595 | /pytorch-exercise/intermediate_cnn.py | 321bc7046f21b90882d99e51250c81a2c413bd2e | [] | no_license | zoulala/exercise | 431c60c1066137469183a78c96e47da4305868ae | 699652f2b722d7cfdf474ce91f0ebc944420e167 | refs/heads/master | 2023-08-18T15:33:55.913567 | 2023-08-08T12:34:37 | 2023-08-08T12:34:37 | 140,717,645 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,613 | py | #!/usr/bin/python
# -*- coding: utf8 -*-
#
# *****************************************************
#
# file: intermediate_cnn.py
# author: zoulingwei@zuoshouyisheng.com
# date: 2019-10-14
# brief: cnn
#
# cmd>e.g:
# *****************************************************
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
# Device configuration
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Hyper parameters
num_epochs = 5
num_classes = 10
batch_size = 100
learning_rate = 0.001
# MNIST dataset
train_dataset = torchvision.datasets.MNIST(root='./data/',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='./data/',
train=False,
transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Convolutional neural network (two convolutional layers)
class ConvNet(nn.Module):
def __init__(self, num_classes=10):
super(ConvNet, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.fc = nn.Linear(7 * 7 * 32, num_classes)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
return out
model = ConvNet(num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 100 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch + 1, num_epochs, i + 1, total_step, loss.item()))
# Test the model
model.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
# Save the model checkpoint
# torch.save(model.state_dict(), 'model.ckpt') | [
"zlw2008ok@126.com"
] | zlw2008ok@126.com |
bf49de51977ea81c7c9dfb13c33ed9803d7dff01 | da96d29b457eb123c01274efea562448df105fc6 | /chapter10/st14.py | 3beaad1741840268fbe861049516df762beb84eb | [] | no_license | Alonsovau/sketches | a1336f1a7909ad059744c4613ab992c8361264f5 | dfb072086cc813d7409fa11393ebaad6e26db180 | refs/heads/master | 2021-01-19T22:29:15.827896 | 2017-10-19T15:37:28 | 2017-10-19T15:37:28 | 88,761,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | # 创建新的python环境
# bash:pyvenv Spam
# bashpyvenv --system-site-packages Spam
| [
"alonsovau@outlook.com"
] | alonsovau@outlook.com |
f4d7705d6583f8e2435b704777f23a7a1f93b42c | b3e2b6b034cf3eefbef366a8616a7cc95eeed9d1 | /scopus/utils.py | 91e9dbc1cd5db458c0782301cc6b487d6d00fb6f | [] | no_license | sciosci/scopus-for-sloan | 48724bf170f2b6394444643c0379d6955c7845af | 5b7377a24da69d73a74952c089af43b6a8d41369 | refs/heads/master | 2022-12-05T02:28:36.479753 | 2020-08-26T16:53:18 | 2020-08-26T16:53:18 | 290,339,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | import csv, json
def read_keys(file):
with open(file, 'r') as reader:
r = csv.reader(reader)
next(r)
return [key[0] for key in r]
def key_generator(keys):
for key in keys:
yield key
return 'Keys Exhausted'
def write_json(data, filename):
with open(filename, 'w') as writer:
json.dump(data, writer, indent=4) | [
"hszyzhl@gmail.com"
] | hszyzhl@gmail.com |
3cca59056f2882a5565439a0d14c270b14d482ab | 11e75691c51d7f627a91e763d771340342439d93 | /ML/config.py | 441d685c2d59d6613078682dcb881918991e8fc7 | [] | no_license | sakuag333/ml_toolkit | 61635f053437e6212eeb05a728f911ddb3b971cd | d1aae7f78493136c47b1b5137908f998a3b70de4 | refs/heads/master | 2020-05-18T08:40:17.678912 | 2013-05-31T18:31:02 | 2013-05-31T18:31:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | import numpy
data = []
cdata = []
prediction = []
allowed_feature = [1,3,4,8]
allowed_feature_testdata = [0,2,3,7]
theta = []
y = []
outputlabel = 0
repeat = 10000
alpha = 12.2
regularisation = 0
trainingset = '../csv/train.csv'
testset = '../csv/test.csv'
outputfile = './logistic_regression_output.csv'
col_ = 0
row_ = 0
crow_ = 0
ccol_ = 0
cost = 0
| [
"sakuag333@gmail.com"
] | sakuag333@gmail.com |
a49758acd5eccec993c5f072278e6931c30fcb20 | b6b11d896ac07282498a74e93cac7916fe220be7 | /src/push_notification/push.py | c5d1299654b3531a76017a6bc150d63a4c014e26 | [] | no_license | ZhouYii/AppProtocol | 055339f736e15eec129d388c31cefba957b14424 | 0923b3d3f0c1d32f18b7a55b573cdcf5a6405677 | refs/heads/master | 2016-09-06T04:03:57.477920 | 2015-06-17T13:51:37 | 2015-06-17T13:51:37 | 31,181,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | import time
from apns import APNs, Frame, Payload
#apns = APNs(use_sandbox=True, cert_file='SocialAppCert.pem', key_file='SocialAppKey.pem')
def send_push_notification(msg="Hello", snd="default", badge_type=1, use_sandbox=True) :
apns = APNs(use_sandbox=True, cert_file='push2.pem', key_file='SocialAppKey2.pem')
# Send a notification
token_hex = '88697bafe4aec5365cbd228d433c42bcd792a42104fc2982186a2bf8e0b1cced'
payload = Payload(alert=msg, sound=snd, badge=badge_type)
apns.gateway_server.send_notification(token_hex, payload)
# Send multiple notifications in a single transmission
'''
frame = Frame()
identifier = 1
expiry = time.time()+3600
priority = 10
frame.add_item('88697bafe4aec5365cbd228d433c42bcd792a42104fc2982186a2bf8e0b1cced', payload, identifier, expiry, priority)
apns.gateway_server.send_notification_multiple(frame)
'''
| [
"ubuntu@ip-172-31-20-117.us-west-2.compute.internal"
] | ubuntu@ip-172-31-20-117.us-west-2.compute.internal |
d2baa376a36471bb1ac599558843249defbdc4ed | d0ff26ca281c1ae55c26465791178ae61aaed329 | /ecom_home/migrations/0027_deals_of_the_day.py | dca1050c570d9151d946f5b55e03dcc207e7f607 | [] | no_license | ali-anas/ecom | 74f8467940392e4eae8b7733acff864ec1f75f31 | bab67a94147b2c7451d017e72c6f152d4cd8ccf6 | refs/heads/main | 2023-02-05T04:42:34.979744 | 2020-12-24T11:34:03 | 2020-12-24T11:34:03 | 324,066,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | # Generated by Django 2.2.14 on 2020-12-22 13:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ecom_home', '0026_item_deals_of_the_day'),
]
operations = [
migrations.CreateModel(
name='deals_of_the_day',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deals_of_the_day_time', models.DateTimeField(null=True)),
],
),
]
| [
"infoalmora786@gmail.com"
] | infoalmora786@gmail.com |
d1896bb2d89ad45ce5d9a56fd92cc0079efd45d6 | 5a8f4e7a16d902c5660496407eae0fb31a3f9bbf | /17-Python.py | 39b21c4b76272e7adbdf03414fddc14d1f149003 | [] | no_license | hoklavat/beginner-sql | 9e7d88cf7f46f86825d9f47bf971050b2455b725 | a61e36146eeca54b6b402c797c2e9f8be3fc350c | refs/heads/main | 2023-01-23T11:06:19.851867 | 2020-11-26T18:11:22 | 2020-11-26T18:11:22 | 311,396,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | # 17-Python
import psycopg2 as pg2
# pip install psycopg2
conn = pg2.connect(database='dvdrental', user='postgres', password='123456') #establish connection to database.
cur = conn.cursor() #get cursor object from database which provides access to data.
cur.execute('SELECT * FROM payment') #execute sql commmand.
cur.fetchone() #select first row from previous query in tuple format. fetchone, fetchall, fetchmany.
data = cur.fetchmany(10) #select first 10 rows.
data[0][4] #fourth column of first-row.
conn.close() #close connection to database. | [
"barisyildiz1982@gmail.com"
] | barisyildiz1982@gmail.com |
4b19d2372805b11996235cac7f81b80f2717217b | 65181237ea55d98f4664a24e5b769f9f37a82dbd | /pinpong game using turtle.py | d2c108cae10964ae9b25bb68fa6e27bc4b2ef72e | [] | no_license | SATHISHKUMAR-01/BEST-ENLIST-PYTHON-INTERNSHIP- | 878903296c32452bf4b23871ef88e58106b99bd4 | 0d66ceb64fa88e89698b72f96ae75a0d58bddf3f | refs/heads/main | 2023-01-28T23:53:59.852898 | 2020-12-07T14:38:05 | 2020-12-07T14:38:05 | 315,260,275 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,262 | py | # Import required library
import turtle
# Create screen
sc = turtle.Screen()
sc.title("PING PONG GAME")
sc.bgcolor("blue")
sc.setup(width=1000, height=600)
# Left paddle
left_pad = turtle.Turtle()
left_pad.speed(0)
left_pad.shape("square")
left_pad.color("white")
left_pad.shapesize(stretch_wid=6, stretch_len=2)
left_pad.penup()
left_pad.goto(-400, 0)
# Right paddle
right_pad = turtle.Turtle()
right_pad.speed(0)
right_pad.shape("square")
right_pad.color("white")
right_pad.shapesize(stretch_wid=6, stretch_len=2)
right_pad.penup()
right_pad.goto(400, 0)
# Ball of circle shape
hit_ball = turtle.Turtle()
hit_ball.speed(40)
hit_ball.shape("circle")
hit_ball.color("yellow")
hit_ball.penup()
hit_ball.goto(0, 0)
hit_ball.dx = 5
hit_ball.dy = -5
# Initialize the score
left_player = 0
right_player = 0
# Displays the score
sketch = turtle.Turtle()
sketch.speed(0)
sketch.color("white")
sketch.penup()
sketch.hideturtle()
sketch.goto(0, 260)
sketch.write("Left_player : 0 Right_player: 0",
align="center", font=("Courier", 24, "normal"))
# Functions to move paddle vertically
def paddleaup():
y = left_pad.ycor()
y += 20
left_pad.sety(y)
def paddleadown():
y = left_pad.ycor()
y -= 20
left_pad.sety(y)
def paddlebup():
y = right_pad.ycor()
y += 20
right_pad.sety(y)
def paddlebdown():
y = right_pad.ycor()
y -= 20
right_pad.sety(y)
# Keyboard bindings
sc.listen()
sc.onkeypress(paddleaup, "w")
sc.onkeypress(paddleadown, "z")
sc.onkeypress(paddlebup, "Up")
sc.onkeypress(paddlebdown, "Down")
while True:
sc.update()
hit_ball.setx(hit_ball.xcor()+hit_ball.dx)
hit_ball.sety(hit_ball.ycor()+hit_ball.dy)
# Checking borders
if hit_ball.ycor() > 280:
hit_ball.sety(280)
hit_ball.dy *= -1
if hit_ball.ycor() < -280:
hit_ball.sety(-280)
hit_ball.dy *= -1
if hit_ball.xcor() > 500:
hit_ball.goto(0, 0)
hit_ball.dy *= -1
left_player += 1
sketch.clear()
sketch.write("Left_player : {} Right_player: {}".format(
left_player, right_player), align="center",
font=("Courier", 24, "normal"))
if hit_ball.xcor() < -500:
hit_ball.goto(0, 0)
hit_ball.dy *= -1
right_player += 1
sketch.clear()
sketch.write("Left_player : {} Right_player: {}".format(
left_player, right_player), align="center",
font=("Courier", 24, "normal"))
# Paddle ball collision
if (hit_ball.xcor() > 360 and hit_ball.xcor() < 370) and(hit_ball.ycor() < right_pad.ycor()+40 and hit_ball.ycor() > right_pad.ycor()-40):
hit_ball.setx(360)
hit_ball.dx*=-1
if (hit_ball.xcor()<-360 and hit_ball.xcor()>-370) and (hit_ball.ycor()<left_pad.ycor()+40 and hit_ball.ycor()>left_pad.ycor()-40):
hit_ball.setx(-360)
hit_ball.dx*=-1
| [
"noreply@github.com"
] | SATHISHKUMAR-01.noreply@github.com |
da48db4291599678b66180ab62b8f4d0b0e67c08 | 9faad698272d139b6e33b85d4101070d718c3822 | /Practice/BOWLERS.py | 0ed10d05590c969def60caba9fb9cb5bc30cac7e | [] | no_license | MautKaFarishta/Competitive_Programming | 0ea1ea9622263ead861032a012515c669bd1a91b | 0d1d9a91176b819ad8104e28967a0945b505574a | refs/heads/master | 2023-03-16T00:09:17.166170 | 2021-03-11T18:15:27 | 2021-03-11T18:15:27 | 287,327,294 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | for _ in range(int(input())):
overs,bowlers,mx = list(map(int,input().split()))
if (bowlers<=1 and overs>1) or (bowlers*mx < overs):
print(-1)
else:
l = list(range(1,bowlers+1))
for o in range(overs):
print(l[o%bowlers],end=' ')
print() | [
"omkhilariindia@gmail.com"
] | omkhilariindia@gmail.com |
a2559e01019a605de100c6083e590cc94c7af762 | 340df8627387b9e591b3fcc995b08020127815d1 | /Backups/pgrrdiag/pgrrdiag1.0.5.py | 7fb3e36fcfbc516d3bd94930ead994541df905cd | [] | no_license | pacificgilly1992/PGrainrate | e67353c404b34bd0e76a18f6d35176c6c57a8c47 | e4802b4e658335070b929d4bb9001fdfbc6bcb63 | refs/heads/master | 2021-01-19T04:25:00.432668 | 2016-02-16T01:56:32 | 2016-02-16T01:56:32 | 45,811,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,323 | py | ############################################################################
# Project: The Lenard effect of preciptation at the RUAO,
# Title: Ensemble processing of the PG, Time and Rain Rate data,
# Author: James Gilmore,
# Email: james.gilmore@pgr.reading.ac.uk.
# Version: 1.0.5
# Date: 07/12/15
############################################################################
#Initialising the python script
from __future__ import absolute_import, division, print_function
from array import array
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats, interpolate
import sys, os
execfile("externals.py")
def PGrainrate(file="RUSOdata/raw/2009M062.rax", bin=100):
time, rain, pg = np.genfromtxt(file, dtype=float, delimiter=',', usecols=(3, 26, 32), unpack=True, skiprows=2)
timekeep = time.copy()
#Calibrate the PG data from V to V/m
PG=(pg.copy()-0.00903)/0.00463
PGTime = np.asarray(zip(timekeep, PG))
#Calibrate the rain data from V to mm
Rain = np.zeros_like(time)
time[0]=time[-1]=0 # Set the first and last value of the time series equal to 0
raintot=0 # Number of tip buckets has occurred in a day
#Buffer tip times
for i in range(len(time)-1):
if rain[i+1]-rain[i]<=0.03:
time[i]=0
Rain[i]=0
else:
Rain[i]=0.2
raintot+=1
#Remove traces when tips has not occured
timerain = time.copy()[time.copy()!=0]
Rain = Rain[Rain!=0]
if len(timerain) < 5:
return 0,0,0,0,0,0,0,0,0,[(0,0,0,0,0)]
RainRate = np.zeros_like(timerain)
TimeTip = np.zeros_like(timerain)
#Determine the rainfall rate (each bucket tip is 0.2mm)
"Note that as the time is currently in units of hours that the"
"derivatives will automatically be displayed in per unit hours"
for i in range(len(timerain)-1):
if timerain[i+1]>timerain[i]:
RainRate[i]=0.2/(timerain[i+1]-timerain[i])
TimeTip[i]=0.5*(timerain[i+1]+timerain[i])
#Determine rainfall rates less than 5mm/hr and relate to relevant time
for i in range(len(RainRate)):
if not RainRate[i]<=5:
TimeTip[i]=-1
RainRate5mm = RainRate[(RainRate<=5) & (RainRate>0)]
TimeTip5mm = TimeTip[TimeTip>0]
if len(RainRate5mm) < 1:
return 0,0,0,0,0,0,0,0,0,[(0,0,0,0,0)]
PGtip = np.zeros_like(TimeTip5mm)
for i in range(len(TimeTip5mm)-1):
PGtemp = PG[(TimeTip5mm[i] <= timekeep) & (timekeep < TimeTip5mm[i+1])]
PGtip[i] = np.median(PGtemp)
#Fit a linear regression model for the PG against rain rate and return some statistics
"This is assumed at the moment that the function is linearly dependent"
"where more rigorous testing of the rain rate data needs to be done"
eps = sys.float_info.epsilon
RRL = RainRate5mm[RainRate5mm<0.5*np.max(RainRate5mm)]/(PGtip[RainRate5mm<0.5*np.max(RainRate5mm)]+eps)
RRH = RainRate5mm[RainRate5mm>=0.5*np.max(RainRate5mm)]/(PGtip[RainRate5mm>=0.5*np.max(RainRate5mm)]+eps)
statistic, pvalue = stats.mannwhitneyu(RRL, RRH)
print("Mann: ",pvalue)
slope, intercept, r_value, p_value, std_err = stats.linregress(RainRate5mm, PGtip)
pearson_cor = stats.pearsonr(TimeTip5mm,RainRate5mm)
#Print the results in a quadrant form displaying the variations in both variables
if p_value >= 0.05:
return 0,0,0,0,0,0,0,0,0,[(0,0,0,0,0)]
print("Beforeprinting")
PGRainFull(np.max(RainRate5mm)+0.2, np.max(PGtip)+100, os.path.basename(file)[:-4], "png" , RainRate5mm, TimeTip5mm, timekeep, PG, PGtip, slope, intercept, p_value, r_value, pearson_cor, std_err, pvalue)
RainRateBin = np.zeros(bincount)
TimeTipBin = np.zeros(bincount)
PGTipBin = np.zeros(bincount)
for i in range(bincount):
RainRateBin[i] = i*5/bincount
for j in range(len(RainRate5mm)):
for i in range(1,bincount):
if (RainRate5mm[j] < i*5/bincount and RainRate5mm[j] > (i-1)*5/bincount):
PGTipBin[i] += PGtip[j]
TimeTipBin[i] += TimeTip5mm[j]
TotalBin[i] += 1
PGTipBinned = PGTipBin.copy()/(TotalBin.copy())
TimeTipBinned = TimeTipBin.copy()/(TotalBin.copy())
#Removes NaN values
PGTipBinned = [0 if np.isnan(x) else x for x in PGTipBinned]
TimeTipBinned = [0 if np.isnan(x) else x for x in TimeTipBinned]
Year = np.zeros_like(TimeTip5mm)
Day = np.zeros_like(TimeTip5mm)
for i in xrange(len(Year)):
Year[i] = os.path.basename(file)[:-8]
Day[i] = os.path.basename(file)[5:-4]
day = zip(Year, Day, TimeTip5mm, RainRate5mm, PGtip)
#Return from the definition with a barrage of data
return slope, intercept, r_value, p_value, std_err, pearson_cor, RainRateBin, TimeTipBin, PGTipBin, day
def PGEnsemble(PGtipBin=None, bincount=None, iterator=None):
"Creates an Ensemble of PG values for every statistically significant"
"day that we have experience charged rain droplets via space charge."
"Determines the PG in arbitrary set bins and thus averages the PG"
"in each bin with the number of events that has happened in said bin."
PGtotal = np.zeros(bincount)
total = np.zeros(bincount)
PGbinMean = np.zeros(bincount)
RainRateBin = np.zeros(bincount)
total = np.zeros_like(PGtipBin)
#Print some information about the analysis
print("bincount: ", bincount)
#Separate out the PG data into regular time intervals
PGtotal = np.asarray([sum(row[i] for row in PGtipBin) for i in range(len(PGtipBin[0]))])
#Convert PGtipBin into binary for when PG was recorded
for i in xrange(iterator): #Usually the length of pgfile
for j in xrange(bincount):
if PGtipBin[i, j] != 0:
total[i, j] = 1
#Then sum how many events happened on each bin
PGtotalcounts = np.asarray([sum(row[i] for row in total) for i in range(len(total[0]))])
for i in range(bincount):
RainRateBin[i] = i*5/bincount
#Find the average PG for each bin and delete corresponding value in Rainratebin
for i in xrange(bincount):
if PGtotalcounts[i] != 0:
PGbinMean[i] = PGtotal[i]/PGtotalcounts[i]
else:
RainRateBin[i] = 0
#Remove zero entries from the list
PGbinMean = PGbinMean[PGbinMean!=0]
RainRateBin = RainRateBin[RainRateBin!=0]
slope, intercept, r_value, p_value, std_err = stats.linregress(RainRateBin, PGbinMean)
plt.scatter(RainRateBin, PGbinMean)
plt.show()
print("P-Value: ", p_value)
print("R^2 Value: ", r_value**2)
print("Standard Error: ", std_err)
PGRainSlim(np.max(RainRateBin)+0.2, np.max(PGbinMean)+0.2, "PGEnsemble", "png", RainRateBin, PGbinMean, slope, intercept)
return
| [
"james.gilmore@pgr.reading.ac.uk"
] | james.gilmore@pgr.reading.ac.uk |
1ca2d61495d0f98f302943535ea2162069b7e8b6 | aefd8e9b53cb3ae613998453638f5e34ddace89f | /main.py | 2a24ad9a717b4a714fac98587d57c3d4090bc3fd | [] | no_license | ZiubinA/Function | 974a7b75982e707026f3955a4abafe29db0a153c | da9a4321a68776deb6ea887467e52b04b32362b8 | refs/heads/master | 2023-06-02T08:06:31.842429 | 2021-06-19T18:22:51 | 2021-06-19T18:22:51 | 378,478,710 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | def say_hello():
print('Hello World!')
say_hello()
def say_hello(name):
print(f'Hello {name}!')
say_hello('ARSENII')
def say_hello(name='World'):
print(f'Hello {name}!')
say_hello()
say_hello('ARSENII')
| [
"49118782+ZiubinA@users.noreply.github.com"
] | 49118782+ZiubinA@users.noreply.github.com |
8463f5bf9d517cd873a77ec7cfd41a7a31485143 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/documentdb/v20151106/database_account_mongo_db_collection.py | 5d0018b666c255e3b903426e465a1d8b4c3f3a37 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 16,004 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['DatabaseAccountMongoDBCollectionArgs', 'DatabaseAccountMongoDBCollection']
@pulumi.input_type
class DatabaseAccountMongoDBCollectionArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
database_name: pulumi.Input[str],
options: pulumi.Input[Mapping[str, pulumi.Input[str]]],
resource: pulumi.Input['MongoDBCollectionResourceArgs'],
resource_group_name: pulumi.Input[str],
collection_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a DatabaseAccountMongoDBCollection resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] database_name: Cosmos DB database name.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] options: A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
:param pulumi.Input['MongoDBCollectionResourceArgs'] resource: The standard JSON format of a MongoDB collection
:param pulumi.Input[str] resource_group_name: Name of an Azure resource group.
:param pulumi.Input[str] collection_name: Cosmos DB collection name.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "options", options)
pulumi.set(__self__, "resource", resource)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if collection_name is not None:
pulumi.set(__self__, "collection_name", collection_name)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
Cosmos DB database account name.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
Cosmos DB database name.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def options(self) -> pulumi.Input[Mapping[str, pulumi.Input[str]]]:
"""
A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
"""
return pulumi.get(self, "options")
@options.setter
def options(self, value: pulumi.Input[Mapping[str, pulumi.Input[str]]]):
pulumi.set(self, "options", value)
@property
@pulumi.getter
def resource(self) -> pulumi.Input['MongoDBCollectionResourceArgs']:
"""
The standard JSON format of a MongoDB collection
"""
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: pulumi.Input['MongoDBCollectionResourceArgs']):
pulumi.set(self, "resource", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of an Azure resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="collectionName")
def collection_name(self) -> Optional[pulumi.Input[str]]:
"""
Cosmos DB collection name.
"""
return pulumi.get(self, "collection_name")
@collection_name.setter
def collection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "collection_name", value)
class DatabaseAccountMongoDBCollection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
collection_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource: Optional[pulumi.Input[pulumi.InputType['MongoDBCollectionResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
An Azure Cosmos DB MongoDB collection.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] collection_name: Cosmos DB collection name.
:param pulumi.Input[str] database_name: Cosmos DB database name.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] options: A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
:param pulumi.Input[pulumi.InputType['MongoDBCollectionResourceArgs']] resource: The standard JSON format of a MongoDB collection
:param pulumi.Input[str] resource_group_name: Name of an Azure resource group.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DatabaseAccountMongoDBCollectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An Azure Cosmos DB MongoDB collection.
:param str resource_name: The name of the resource.
:param DatabaseAccountMongoDBCollectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DatabaseAccountMongoDBCollectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
collection_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource: Optional[pulumi.Input[pulumi.InputType['MongoDBCollectionResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DatabaseAccountMongoDBCollectionArgs.__new__(DatabaseAccountMongoDBCollectionArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["collection_name"] = collection_name
if database_name is None and not opts.urn:
raise TypeError("Missing required property 'database_name'")
__props__.__dict__["database_name"] = database_name
if options is None and not opts.urn:
raise TypeError("Missing required property 'options'")
__props__.__dict__["options"] = options
if resource is None and not opts.urn:
raise TypeError("Missing required property 'resource'")
__props__.__dict__["resource"] = resource
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["indexes"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["shard_key"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb/v20151106:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-native:documentdb:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-nextgen:documentdb:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-native:documentdb/v20150401:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20150401:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-native:documentdb/v20150408:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20150408:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-native:documentdb/v20160319:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20160319:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-native:documentdb/v20160331:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20160331:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-native:documentdb/v20190801:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20190801:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-native:documentdb/v20191212:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20191212:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-native:documentdb/v20200301:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200301:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-native:documentdb/v20200401:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200401:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-native:documentdb/v20200601preview:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200601preview:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-native:documentdb/v20200901:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200901:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-native:documentdb/v20210115:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210115:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-native:documentdb/v20210301preview:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210301preview:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-native:documentdb/v20210315:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210315:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-native:documentdb/v20210401preview:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210401preview:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-native:documentdb/v20210415:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210415:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-native:documentdb/v20210515:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210515:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-native:documentdb/v20210615:DatabaseAccountMongoDBCollection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210615:DatabaseAccountMongoDBCollection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DatabaseAccountMongoDBCollection, __self__).__init__(
'azure-native:documentdb/v20151106:DatabaseAccountMongoDBCollection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DatabaseAccountMongoDBCollection':
"""
Get an existing DatabaseAccountMongoDBCollection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = DatabaseAccountMongoDBCollectionArgs.__new__(DatabaseAccountMongoDBCollectionArgs)
__props__.__dict__["indexes"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["shard_key"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return DatabaseAccountMongoDBCollection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def indexes(self) -> pulumi.Output[Optional[Sequence['outputs.MongoIndexResponse']]]:
"""
List of index keys
"""
return pulumi.get(self, "indexes")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the database account.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="shardKey")
def shard_key(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A key-value pair of shard keys to be applied for the request.
"""
return pulumi.get(self, "shard_key")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | morrell.noreply@github.com |
f384062a89b40c9eb2c4d98124b3c7b4e03c563a | 358dff36e38806cfd1dd83fdcb05339f282f0575 | /AVR_Miner.py | 116846b26dc04dababd92ff1cbc00dafd6ecaf11 | [
"MIT"
] | permissive | troll3838/duino-coin | b6899ef0536c5f1f288142041e7217e5e6299c94 | 15c1d46c5157786ae6569e4d26c2048bae8afe7d | refs/heads/master | 2023-04-04T19:19:50.577908 | 2021-04-07T08:52:33 | 2021-04-07T08:52:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,890 | py | #!/usr/bin/env python3
##########################################
# Duino-Coin Python AVR Miner (v2.4)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2019-2021
##########################################
# Import libraries
from socket import socket
from threading import Thread as thrThread
from time import sleep, time, ctime, strptime
from os import execl, path, mkdir, _exit
from os import name as osname
from os import system as ossystem
from re import sub
from subprocess import Popen, check_call, DEVNULL
from configparser import ConfigParser
from datetime import datetime
from locale import getlocale, setlocale, getdefaultlocale, LC_ALL
from json import load as jsonload
from platform import system
from pathlib import Path
from signal import signal, SIGINT
import sys
def install(package):
# Install pip package automatically
check_call([sys.executable, "-m", "pip", "install", package])
execl(sys.executable, sys.executable, *sys.argv)
def now():
# Return datetime object
return datetime.now()
try:
# Check if pyserial is installed
import serial
import serial.tools.list_ports
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ 'Pyserial is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "pyserial" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install("pyserial")
try:
# Check if colorama is installed
from colorama import init, Fore, Back, Style
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ 'Colorama is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "colorama" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install("colorama")
try:
# Check if requests is installed
import requests
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ 'Requests is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "requests" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install("requests")
try:
# Check if pypresence is installed
from pypresence import Presence
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ 'Pypresence is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "pypresence" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install("pypresence")
# Global variables
minerVersion = "2.4" # Version number
timeout = 15 # Socket timeout
resourcesFolder = "AVRMiner_" + str(minerVersion) + "_resources"
shares = [0, 0]
diff = 0
donatorrunning = False
job = ""
debug = "n"
rigIdentifier = "None"
# Serverip file
serveripfile = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/gh-pages/serverip.txt")
config = ConfigParser()
donationlevel = 0
hashrate = 0
# Create resources folder if it doesn't exist
if not path.exists(resourcesFolder):
mkdir(resourcesFolder)
# Check if languages file exists
if not Path(resourcesFolder + "/langs.json").is_file():
url = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "AVR_Miner_langs.json")
r = requests.get(url)
with open(resourcesFolder + "/langs.json", "wb") as f:
f.write(r.content)
# Load language file
with open(resourcesFolder + "/langs.json", "r", encoding="utf8") as lang_file:
lang_file = jsonload(lang_file)
# OS X invalid locale hack
if system() == 'Darwin':
if getlocale()[0] is None:
setlocale(LC_ALL, 'en_US.UTF-8')
# Check if miner is configured, if it isn't, autodetect language
if not Path(resourcesFolder + "/Miner_config.cfg").is_file():
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("sk"):
lang = "slovak"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
else:
lang = "english"
else:
try:
# Read language from configfile
config.read(resourcesFolder + "/Miner_config.cfg")
lang = config["arduminer"]["language"]
except Exception:
# If it fails, fallback to english
lang = "english"
def getString(string_name):
# Get string form language file
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
def debugOutput(text):
# Debug output
if debug == "y":
print(
Style.RESET_ALL
+ now().strftime(Style.DIM + "%H:%M:%S.%f ")
+ "DEBUG: "
+ str(text))
def title(title):
# Window title
if osname == "nt":
# Windows systems
ossystem("title " + title)
else:
# Most standard terminals
print("\33]0;" + title + "\a", end="")
sys.stdout.flush()
def Connect():
# Server connection
global masterServer_address
global masterServer_port
while True:
try:
try:
socket.close()
except Exception:
pass
debugOutput("Connecting to "
+ str(masterServer_address)
+ str(":")
+ str(masterServer_port))
socConn = socket()
# Establish socket connection to the server
socConn.connect(
(str(masterServer_address), int(masterServer_port)))
# Get server version
serverVersion = socConn.recv(3).decode().rstrip("\n")
debugOutput("Server version: " + serverVersion)
if (float(serverVersion) <= float(minerVersion)
and len(serverVersion) == 3):
# If miner is up-to-date, display a message and continue
prettyPrint(
"net0",
getString("connected")
+ Style.NORMAL
+ Fore.RESET
+ getString("connected_server")
+ str(serverVersion)
+ ")",
"success")
break
else:
prettyPrint(
"sys0",
" Miner is outdated (v"
+ minerVersion
+ ") -"
+ getString("server_is_on_version")
+ serverVersion
+ Style.NORMAL
+ Fore.RESET
+ getString("update_warning"),
"warning")
sleep(10)
break
except Exception as e:
prettyPrint(
"net0",
getString("connecting_error")
+ Style.NORMAL
+ " ("
+ str(e)
+ ")",
"error")
debugOutput("Connection error: " + str(e))
sleep(10)
restart_miner()
return socConn
def connectToAVR(com):
try:
# Close previous serial connections (if any)
comConn.close()
except Exception:
pass
# Establish serial connection
comConn = serial.Serial(
com,
baudrate=115200,
timeout=5)
prettyPrint(
"usb"
+ str(''.join(filter(str.isdigit, com))),
getString("board_on_port")
+ Fore.YELLOW
+ str(com)
+ Style.NORMAL
+ Fore.RESET
+ getString("board_is_connected"),
"success")
return comConn
def handler(signal_received, frame):
# SIGINT handler
prettyPrint(
"sys0",
getString("sigint_detected")
+ Style.NORMAL
+ Fore.RESET
+ getString("goodbye"),
"warning")
try:
# Close previous socket connection (if any)
socket.close()
except Exception:
pass
_exit(0)
# Enable signal handler
signal(SIGINT, handler)
def loadConfig():
# Config loading section
global pool_address
global pool_port
global username
global donationlevel
global avrport
global debug
global requestedDiff
global rigIdentifier
# Initial configuration section
if not Path(str(resourcesFolder) + "/Miner_config.cfg").is_file():
print(
Style.BRIGHT
+ getString("basic_config_tool")
+ resourcesFolder
+ getString("edit_config_file_warning"))
print(
Style.RESET_ALL
+ getString("dont_have_account")
+ Fore.YELLOW
+ getString("wallet")
+ Fore.RESET
+ getString("register_warning"))
username = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_username")
+ Fore.RESET
+ Style.BRIGHT)
print(Style.RESET_ALL
+ Fore.YELLOW
+ getString("ports_message"))
portlist = serial.tools.list_ports.comports()
for port in portlist:
print(Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ " "
+ str(port))
print(Style.RESET_ALL
+ Fore.YELLOW
+ getString("ports_notice"))
avrport = ""
while True:
avrport += input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_avrport")
+ Fore.RESET
+ Style.BRIGHT)
confirmation = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_anotherport")
+ Fore.RESET
+ Style.BRIGHT)
if confirmation == "y" or confirmation == "Y":
avrport += ","
else:
break
requestedDiffSelection = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_higherdiff")
+ Fore.RESET
+ Style.BRIGHT)
if requestedDiffSelection == "y" or requestedDiffSelection == "Y":
requestedDiff = "ESP32"
else:
requestedDiff = "AVR"
rigIdentifier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_rig_identifier")
+ Fore.RESET
+ Style.BRIGHT)
if rigIdentifier == "y" or rigIdentifier == "Y":
rigIdentifier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_rig_name")
+ Fore.RESET
+ Style.BRIGHT)
else:
rigIdentifier = "None"
donationlevel = "0"
if osname == "nt" or osname == "posix":
donationlevel = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_donation_level")
+ Fore.RESET
+ Style.BRIGHT)
# Check wheter donationlevel is correct
donationlevel = sub("\D", "", donationlevel)
if donationlevel == '':
donationlevel = 1
if float(donationlevel) > int(5):
donationlevel = 5
if float(donationlevel) < int(0):
donationlevel = 0
# Format data
config["arduminer"] = {
"username": username,
"avrport": avrport,
"donate": donationlevel,
"language": lang,
"identifier": rigIdentifier,
"difficulty": requestedDiff,
"debug": "n"}
# Write data to file
with open(str(resourcesFolder)
+ "/Miner_config.cfg", "w") as configfile:
config.write(configfile)
avrport = avrport.split(",")
print(Style.RESET_ALL + getString("config_saved"))
else: # If config already exists, load from it
config.read(str(resourcesFolder) + "/Miner_config.cfg")
username = config["arduminer"]["username"]
avrport = config["arduminer"]["avrport"]
avrport = avrport.split(",")
donationlevel = config["arduminer"]["donate"]
debug = config["arduminer"]["debug"]
rigIdentifier = config["arduminer"]["identifier"]
requestedDiff = config["arduminer"]["difficulty"]
def Greeting():
# Greeting message depending on time
global greeting
print(Style.RESET_ALL)
current_hour = strptime(ctime(time())).tm_hour
if current_hour < 12:
greeting = getString("greeting_morning")
elif current_hour == 12:
greeting = getString("greeting_noon")
elif current_hour > 12 and current_hour < 18:
greeting = getString("greeting_afternoon")
elif current_hour >= 18:
greeting = getString("greeting_evening")
else:
greeting = getString("greeting_back")
# Startup message
print(
Style.DIM
+ Fore.MAGENTA
+ " ‖ "
+ Fore.YELLOW
+ Style.BRIGHT
+ getString("banner")
+ Style.RESET_ALL
+ Fore.MAGENTA
+ " (v"
+ str(minerVersion)
+ ") "
+ Fore.RESET
+ "2019-2021")
print(
Style.DIM
+ Fore.MAGENTA
+ " ‖ "
+ Style.NORMAL
+ Fore.MAGENTA
+ "https://github.com/revoxhere/duino-coin")
print(
Style.DIM
+ Fore.MAGENTA
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("avr_on_port")
+ Style.BRIGHT
+ Fore.YELLOW
+ " ".join(avrport))
if osname == "nt" or osname == "posix":
print(
Style.DIM
+ Fore.MAGENTA
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("donation_level")
+ Style.BRIGHT
+ Fore.YELLOW
+ str(donationlevel))
print(
Style.DIM
+ Fore.MAGENTA
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("algorithm")
+ Style.BRIGHT
+ Fore.YELLOW
+ "DUCO-S1A @ "
+ str(requestedDiff)
+ " diff")
print(
Style.DIM
+ Fore.MAGENTA
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("rig_identifier")
+ Style.BRIGHT
+ Fore.YELLOW
+ rigIdentifier)
print(
Style.DIM
+ Fore.MAGENTA
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ str(greeting)
+ ", "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(username)
+ "!\n")
if osname == "nt":
# Initial miner executable section
if not Path(resourcesFolder + "/Donate_executable.exe").is_file():
debugOutput(
"OS is Windows, downloading developer donation executable")
url = ("https://github.com/"
+ "revoxhere/"
+ "duino-coin/blob/useful-tools/"
+ "DonateExecutableWindows.exe?raw=true")
r = requests.get(url)
with open(resourcesFolder + "/Donate_executable.exe", "wb") as f:
f.write(r.content)
elif osname == "posix":
# Initial miner executable section
if not Path(resourcesFolder + "/Donate_executable").is_file():
debugOutput(
"OS is Windows, downloading developer donation executable")
url = ("https://github.com/"
+ "revoxhere/"
+ "duino-coin/blob/useful-tools/"
+ "DonateExecutableLinux?raw=true")
r = requests.get(url)
with open(resourcesFolder + "/Donate_executable", "wb") as f:
f.write(r.content)
def restart_miner():
try:
if donatorrunning:
donateExecutable.terminate()
except Exception as e:
prettyPrint(
"sys0",
"Error closing donate executable"
+ Style.NORMAL
+ Fore.RESET
+ " ("
+ str(e)
+ ")",
"error")
try:
execl(sys.executable, sys.executable, *sys.argv)
except Exception as e:
prettyPrint(
"sys0",
"Error restarting miner"
+ " ("
+ str(e)
+ ")",
"error")
def Donate():
global donationlevel
global donatorrunning
global donateExecutable
if osname == "nt":
cmd = (
"cd "
+ resourcesFolder
+ "& Donate_executable.exe "
+ "-o stratum+tcp://xmg.minerclaim.net:7008 "
+ "-u revox.donate "
+ "-p x -s 4 -e ")
elif osname == "posix":
cmd = (
"cd "
+ resourcesFolder
+ "&& chmod +x Donate_executable "
+ "&& ./Donate_executable "
+ "-o stratum+tcp://xmg.minerclaim.net:7008 "
+ "-u revox.donate "
+ "-p x -s 4 -e ")
if int(donationlevel) <= 0:
prettyPrint(
"sys0",
Fore.YELLOW
+ getString("free_network_warning")
+ getString("donate_warning")
+ Fore.GREEN
+ "https://duinocoin.com/donate"
+ Fore.YELLOW
+ getString("learn_more_donate"),
"warning")
sleep(10)
elif donatorrunning == False:
if int(donationlevel) == 5:
cmd += "95"
elif int(donationlevel) == 4:
cmd += "75"
elif int(donationlevel) == 3:
cmd += "50"
elif int(donationlevel) == 2:
cmd += "20"
elif int(donationlevel) == 1:
cmd += "10"
if int(donationlevel) > 0:
debugOutput(getString("starting_donation"))
donatorrunning = True
# Launch CMD as subprocess
donateExecutable = Popen(
cmd, shell=True, stderr=DEVNULL)
prettyPrint(
"sys0",
getString("thanks_donation"),
"warning")
def initRichPresence():
# Initialize Discord rich presence
global RPC
try:
RPC = Presence(808056068113563701)
RPC.connect()
debugOutput("Discord rich presence initialized")
except Exception:
# Discord not launched
pass
def updateRichPresence():
# Update rich presence status
startTime = int(time())
while True:
try:
RPC.update(
details="Hashrate: " + str(hashrate) + " H/s",
start=startTime,
state="Acc. shares: "
+ str(shares[0])
+ "/"
+ str(shares[0] + shares[1]),
large_image="ducol",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything, "
+ "including AVR boards",
buttons=[
{"label": "Learn more",
"url": "https://duinocoin.com"},
{"label": "Discord Server",
"url": "https://discord.gg/k48Ht5y"}])
except Exception:
# Discord not launched
pass
# 15 seconds to respect Discord's rate limit
sleep(15)
def prettyPrint(messageType, message, state):
# Print output messages in the DUCO "standard"
# Usb/net/sys background
if messageType.startswith("net"):
background = Back.BLUE
elif messageType.startswith("usb"):
background = Back.MAGENTA
if messageType.startswith("sys"):
background = Back.GREEN
# Text color
if state == "success":
color = Fore.GREEN
elif state == "warning":
color = Fore.YELLOW
else:
color = Fore.RED
print(Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ background
+ " "
+ messageType
+ " "
+ Back.RESET
+ color
+ Style.BRIGHT
+ message
+ Style.NORMAL
+ Fore.RESET)
def AVRMine(com):
# Mining section
errorCounter = 0
global hashrate
global masterServer_address
global masterServer_port
while True:
# Grab server IP and port
while True:
try:
# Use request to grab data from raw github file
res = requests.get(serveripfile, data=None)
if res.status_code == 200:
# Read content and split into lines
content = (res.content.decode().splitlines())
masterServer_address = content[0] # Line 1 = pool address
masterServer_port = content[1] # Line 2 = pool port
debugOutput(
"Retrieved pool IP: "
+ masterServer_address
+ ":"
+ str(masterServer_port))
# Connect to the server
socConn = Connect()
break
except Exception as e:
# If there was an error with grabbing data from GitHub
prettyPrint(
"net"
+ str(''.join(filter(str.isdigit, com))),
getString("data_error")
+ Style.NORMAL
+ Fore.RESET
+ " (git err: "
+ str(e)
+ ")",
"error")
debugOutput("GitHub error: " + str(e))
sleep(10)
while True:
try:
# Connect to the serial port
comConn = connectToAVR(com)
prettyPrint(
"sys"
+ str(''.join(filter(str.isdigit, com))),
getString("mining_start")
+ Style.NORMAL
+ Fore.RESET
+ getString("mining_algorithm")
+ str(com)
+ ")",
"success")
break
except Exception as e:
prettyPrint(
"usb"
+ str(''.join(filter(str.isdigit, com))),
getString("mining_avr_connection_error")
+ Style.NORMAL
+ Fore.RESET
+ " (avr connection err: "
+ str(e)
+ ")",
"error")
sleep(10)
while True:
while True:
try:
# Send job request
debugOutput("Requested job from the server")
socConn.send(
bytes(
"JOB,"
+ str(username)
+ ","
+ str(requestedDiff),
encoding="utf8"))
# Retrieve work
job = socConn.recv(85).decode()
# Split received data
job = job.rstrip("\n").split(",")
# Check if username is correct
if job[1] == "This user doesn't exist":
prettyPrint(
"net"
+ str(''.join(filter(str.isdigit, com))),
getString("mining_user")
+ str(username)
+ getString("mining_not_exist")
+ Style.NORMAL
+ Fore.RESET
+ getString("mining_not_exist_warning"),
"error")
sleep(10)
# If job was received, continue
elif job[0] and job[1] and job[2]:
diff = int(job[2])
debugOutput("Job received: " + " ".join(job))
break
except Exception as e:
prettyPrint(
"net"
+ str(''.join(filter(str.isdigit, com))),
getString("connecting_error")
+ Style.NORMAL
+ Fore.RESET
+ " (net err: "
+ str(e)
+ ")",
"error")
debugOutput("Connection error: " + str(e))
sleep(10)
restart_miner()
while True:
while True:
while True:
try:
# Write data to AVR board
comConn.write(bytes(
str(job[0]
+ ","
+ job[1]
+ ","
+ job[2]
+ ","),
encoding="utf8"))
debugOutput("Sent job to AVR")
# Read the result
result = comConn.readline().decode()
# print(repr(result))
result = result.rstrip("\n").split(",")
if result != "" and result[0] and result[1]:
debugOutput("Received from AVR: "
+ " ".join(result))
break
else:
raise Exception("Empty data")
except Exception as e:
errorCounter += 1
if errorCounter >= 5:
debugOutput(
"Reconnecting to AVR - too many errors")
prettyPrint(
"usb"
+ str(''.join(filter(str.isdigit, com))),
getString("mining_avr_not_responding")
+ Style.NORMAL
+ Fore.RESET
+ " (errorCounter > 5: "
+ str(e)
+ ")",
"error")
comConn = connectToAVR(com)
errorCounter = 0
debugOutput(
"Exception with to serial: " + str(e))
sleep(1)
try:
debugOutput("Received result (" + str(result[0]) + ")")
debugOutput("Received time (" + str(result[1]) + ")")
ducos1result = result[0]
# Convert AVR time to seconds
computetime = round(int(result[1]) / 1000000, 3)
# Calculate hashrate
hashrate = round(
int(result[0]) / int(result[1]) * 1000000, 2)
debugOutput(
"Calculated hashrate (" + str(hashrate) + ")")
if int(hashrate) > 30000:
raise Exception(
"Response too fast - possible AVR error")
try:
chipID = result[2]
debugOutput(
"Received chip ID (" + str(result[2]) + ")")
# Check if user is using the latest Arduino code
# This is not used yet anywhere, but will soon be
# added as yet another a security measure in the
# Kolka security system for identifying AVR boards
if (not chipID.startswith("DUCOID")
or len(chipID) < 21):
raise Exception("Wrong chipID string")
except Exception:
prettyPrint(
"usb"
+ str(''.join(filter(str.isdigit, com))),
" Possible incorrect chipID!"
+ Style.NORMAL
+ Fore.RESET
+ " This will cause problems with the future"
+ " release of Kolka security system",
"warning")
chipID = "None"
break
except Exception as e:
prettyPrint(
"usb"
+ str(''.join(filter(str.isdigit, com))),
getString("mining_avr_connection_error")
+ Style.NORMAL
+ Fore.RESET
+ " (err splitting avr data: "
+ str(e)
+ ")",
"error")
debugOutput("Error splitting data: " + str(e))
sleep(1)
try:
# Send result to the server
socConn.send(
bytes(
str(ducos1result)
+ ","
+ str(hashrate)
+ ",Official AVR Miner (DUCO-S1A) v"
+ str(minerVersion)
+ ","
+ str(rigIdentifier)
+ ","
+ str(chipID),
encoding="utf8"))
except Exception as e:
prettyPrint(
"net"
+ str(''.join(filter(str.isdigit, com))),
getString("connecting_error")
+ Style.NORMAL
+ Fore.RESET
+ " ("
+ str(e)
+ ")",
"error")
debugOutput("Connection error: " + str(e))
sleep(10)
restart_miner()
while True:
try:
responsetimetart = now()
# Get feedback
feedback = socConn.recv(48).decode().rstrip("\n")
responsetimestop = now()
# Measure server ping
timeDelta = (responsetimestop -
responsetimetart).microseconds
ping = round(timeDelta / 1000)
debugOutput("Successfully retrieved feedback: " +
str(feedback) + " with ping: " + str(ping))
break
except Exception as e:
prettyPrint(
"net"
+ str(''.join(filter(str.isdigit, com))),
getString("connecting_error")
+ Style.NORMAL
+ Fore.RESET
+ " (err parsing response: "
+ str(e)
+ ")",
"error")
debugOutput("Error parsing response: " +
str(e) + ", restarting miner")
sleep(1)
restart_miner()
if feedback == "GOOD":
# If result was correct
shares[0] += 1
title(
getString("duco_avr_miner")
+ str(minerVersion)
+ ") - "
+ str(shares[0])
+ "/"
+ str(shares[0] + shares[1])
+ getString("accepted_shares"))
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.RESET
+ " usb"
+ str(''.join(filter(str.isdigit, com)))
+ " "
+ Back.RESET
+ Fore.GREEN
+ " ✓"
+ getString("accepted")
+ Fore.RESET
+ str(int(shares[0]))
+ "/"
+ str(int(shares[0] + shares[1]))
+ Fore.YELLOW
+ " ("
+ str(int((shares[0]
/ (shares[0] + shares[1]) * 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str(f"%01.3f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(round(hashrate))
+ " H/s"
+ Style.NORMAL
+ Fore.RESET
+ " @ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str(f"%02.0f" % int(ping))
+ "ms")
break # Repeat
elif feedback == "BLOCK":
# If block was found
shares[0] += 1
title(
getString("duco_avr_miner")
+ str(minerVersion)
+ ") - "
+ str(shares[0])
+ "/"
+ str(shares[0] + shares[1])
+ getString("accepted_shares"))
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.RESET
+ " usb"
+ str(''.join(filter(str.isdigit, com)))
+ " "
+ Back.RESET
+ Fore.CYAN
+ " ✓"
+ getString("block_found")
+ Fore.RESET
+ str(int(shares[0]))
+ "/"
+ str(int(shares[0] + shares[1]))
+ Fore.YELLOW
+ " ("
+ str(int((shares[0]
/ (shares[0] + shares[1]) * 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str(f"%01.3f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(int(hashrate))
+ " H/s"
+ Style.NORMAL
+ Fore.RESET
+ " @ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str(f"%02.0f" % int(ping))
+ "ms")
break
else:
# If result was incorrect
shares[1] += 1
title(
getString("duco_avr_miner")
+ str(minerVersion)
+ ") - "
+ str(shares[0])
+ "/"
+ str(shares[0] + shares[1])
+ getString("accepted_shares"))
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.RESET
+ " usb"
+ str(''.join(filter(str.isdigit, com)))
+ " "
+ Back.RESET
+ Fore.RED
+ " ✗"
+ getString("rejected")
+ Fore.RESET
+ str(int(shares[0]))
+ "/"
+ str(int(shares[0] + shares[1]))
+ Fore.YELLOW
+ " ("
+ str(int((shares[0]
/ (shares[0] + shares[1]) * 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str(f"%01.3f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(int(hashrate))
+ " H/s"
+ Style.NORMAL
+ Fore.RESET
+ " @ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str(f"%02.0f" % int(ping))
+ "ms")
break # Repeat
break
if __name__ == "__main__":
# Colorama
init(autoreset=True)
# Window title
title(getString("duco_avr_miner") + str(minerVersion) + ")")
try:
# Load config file or create new one
loadConfig()
debugOutput("Config file loaded")
except Exception as e:
prettyPrint(
"sys0",
getString("load_config_error")
+ resourcesFolder
+ getString("load_config_error_warning")
+ Style.NORMAL
+ Fore.RESET
+ " ("
+ str(e)
+ ")",
"error")
debugOutput("Error reading configfile: " + str(e))
sleep(10)
_exit(1)
try:
# Display greeting message
Greeting()
debugOutput("Greeting displayed")
except Exception as e:
debugOutput("Error displaying greeting message: " + str(e))
try:
# Start donation thread
Donate()
except Exception as e:
debugOutput("Error launching donation thread: " + str(e))
try:
# Launch avr duco mining threads
for port in avrport:
thrThread(
target=AVRMine,
args=(port,)).start()
except Exception as e:
debugOutput("Error launching AVR thead(s): " + str(e))
try:
# Discord rich presence threads
initRichPresence()
thrThread(
target=updateRichPresence).start()
except Exception as e:
debugOutput("Error launching Discord RPC thead: " + str(e))
| [
"noreply@github.com"
] | troll3838.noreply@github.com |
64396ac27d4f4762dab12e0c99a9d0f1b3aa2239 | 415e11fbf113512c3b2081a55796bcddc652b34e | /candycollector_project/wsgi.py | aa2e8d43b87b57a2056b8f45c261ea130351d8aa | [] | no_license | cooperama/candy-collector-v2.0 | e51cdbcd67071e0423c22038843f96939fb04cc8 | 26cb58d875313693a5ec477a5bbe4efb204d9017 | refs/heads/master | 2023-02-03T13:46:39.190537 | 2020-10-31T21:33:13 | 2020-10-31T21:33:13 | 322,905,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | """
WSGI config for candycollector_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'candycollector_project.settings')
application = get_wsgi_application()
| [
"coope133@gmail.com"
] | coope133@gmail.com |
d3b7ac13522a9b349c38c2edc442946aecf70bdc | d4ffea27de224e2620f8c0232317436b91410acf | /state.py | 4014522f1833259bfc9c1749aff16fdb583af4ac | [] | no_license | dlievre/python | 94aa734516c54b5cef5af2c2ce489b6c5e05b02b | 5450384f07ba51484e084dbb28d02f1a3fa17fe6 | refs/heads/master | 2021-01-18T16:02:34.857144 | 2017-04-05T16:54:54 | 2017-04-05T16:54:54 | 86,707,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | # -*- coding: utf8 -*-
def traitement(param):
states = {
"Oregon" : "OR",
"Alabama" : "AL",
"New Jersey": "NJ",
"Colorado" : "CO"
}
capital_cities = {
"OR": "Salem",
"AL": "Montgomery",
"NJ": "Trenton",
"CO": "Denver"
}
if param in capital_cities.values():
for key in capital_cities:
if capital_cities[key] == param:
for key2 in states:
if states[key2] == key:
print(key2)
else:
print('Unknown capital city')
if __name__ == '__main__':
import sys
if len(sys.argv) == 2:
traitement(sys.argv[1]) | [
"noreply@github.com"
] | dlievre.noreply@github.com |
dc88ac21b5dd97250519560636bdfcf95202af7d | 91228acbc6fa43de75b529ce5a96f9af9c8ab2ab | /ATTfold/common/config.py | 4de6dcbf236c70105ea44af5446b385d4f402d0d | [] | no_license | YL-wang/ATTfold | 37199b8c7ff268ab87b3e235b3e18bdd4d9eb7b0 | 098e36f503e580fb951c4f40465d26fd1633816f | refs/heads/master | 2022-12-25T22:40:29.212729 | 2020-10-02T16:31:34 | 2020-10-02T16:31:34 | 299,803,506 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | import json
import os
import munch
import random
import numpy as np
def get_config_from_json(json_file):
"""
Get the config from a json file
:param json_file:
:return: config(namespace) or config(dictionary)
"""
# parse the configurations from the config json file provided
with open(json_file, 'r') as config_file:
config_dict = json.load(config_file)
return config_dict
def process_config(jsonfile):
config_dict = get_config_from_json(jsonfile)
config = munch.Munch(config_dict)
return config
| [
"905327686@qq.com"
] | 905327686@qq.com |
366c65216159efadf320ddb2acbd139b0267c839 | 75aadf158d9de0fe89f5cedf8896599dd8d34769 | /setup.py | 43781fbf2a469ef69386067f1df645a31b5cfe42 | [] | no_license | evrenesat/django-dia | b69344beae8e0f9b1a5ff99974f81ba8d8efed59 | 23f48e37b7a77375f956ec6052baa7ac352d2ca5 | refs/heads/master | 2020-05-25T13:27:22.989529 | 2019-05-21T11:26:38 | 2019-05-21T11:26:38 | 187,822,761 | 0 | 0 | null | 2019-05-21T11:21:36 | 2019-05-21T11:21:36 | null | UTF-8 | Python | false | false | 1,205 | py | from setuptools import setup
from sys import argv
def is_register_command(a):
for item in a:
if item.startswith('-'):
continue
return item in ('register', 'bdist_wheel')
return False
longdesc = None
if is_register_command(argv[1:]):
with open('README.rst') as f:
longdesc = f.read()
setup(
name='django-dia',
version='0.4.1',
description='Generate .dia diagram of your django project\'s models',
long_description=longdesc,
url='https://github.com/neumond/django-dia',
author='Vitalik Verhovodov',
author_email='knifeslaughter@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
keywords='django dia model diagram',
packages=['django-dia', 'django-dia.management', 'django-dia.management.commands'],
package_data={'django-dia': ['empty.xml']},
install_requires=['Django', 'six'],
extras_require={
'tests': ['pytest', 'pytest-django', 'pytest-pythonpath']
}
)
| [
"knifeslaughter@gmail.com"
] | knifeslaughter@gmail.com |
bec3c4d503f96444c356b39ba80bba98e6cff20c | 9df8ca72108d95f703d6839b67f71e3c185860a9 | /python/feature_selection/lib/_class/DFROCAUCThreshold.py | 4332cfa424a49c5fbe2feab9f903a4ba81890a3c | [] | no_license | Hann-THL/DATA_SCIENCE | d0010c9c77326f21a03d989faab61ccf1b53f3f1 | 7cda0a36d81e0d710704a1261521a3bb73e5c690 | refs/heads/master | 2022-11-21T11:21:41.568958 | 2020-07-27T11:09:10 | 2020-07-27T11:09:10 | 204,294,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,734 | py | from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.exceptions import NotFittedError
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import RepeatedStratifiedKFold
import pandas as pd
import numpy as np
from tqdm import tqdm
class DFROCAUCThreshold(BaseEstimator, TransformerMixin):
def __init__(self, columns=None, threshold=.5, estimator=RandomForestClassifier(), cv=RepeatedStratifiedKFold(), multi_class='raise'):
self.columns = columns
self.threshold = threshold
self.estimator = estimator
self.cv = cv
self.multi_class = multi_class
self.transform_cols = None
self.stat_df = None
def fit(self, X, y):
self.columns = X.columns if self.columns is None else self.columns
self.transform_cols = [x for x in X.columns if x in self.columns]
# Univariate ROC-AUC
cv_scores = []
for column in self.transform_cols:
scores = []
splits = tqdm(self.cv.split(X, y))
for train_index, test_index in splits:
X_train = X.loc[train_index][[column]]
y_train = y.loc[train_index]
X_test = X.loc[test_index][[column]]
y_test = y.loc[test_index]
if self.multi_class in ['ovo', 'ovr']:
y_train = pd.get_dummies(y_train)
y_test = pd.get_dummies(y_test)
self.estimator.fit(X_train, y_train)
y_pred = self.estimator.predict(X_test)
scores.append(round(roc_auc_score(y_test, y_pred, multi_class=self.multi_class), 5))
splits.set_description(f'Cross-Validation[{column}]')
cv_scores.append(scores)
self.stat_df = pd.DataFrame({
'feature': self.transform_cols,
'cv_score': cv_scores
})
self.stat_df['average_score'] = self.stat_df['cv_score'].apply(lambda x: np.mean(x))
self.stat_df['support'] = np.where(np.array(self.stat_df['average_score']) > self.threshold, True, False)
return self
def transform(self, X):
if self.transform_cols is None:
raise NotFittedError(f"This {self.__class__.__name__} instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.")
features = self.stat_df[self.stat_df['support']].sort_values(by='average_score', ascending=False)['feature'].values
new_X = X[features].copy()
return new_X
def fit_transform(self, X, y):
return self.fit(X, y).transform(X) | [
"hann_lim@hotmail.com"
] | hann_lim@hotmail.com |
1249c9dbad05a3aa1411daa845c5393e54e738a6 | e5744a54767e4270221fa9baf9c52e5d39a8a9da | /Lab Tutorial/Lab 5/lab5_1_Ahmad Dzaki Naufal_1606889093_A_Adrianus Saga Ekakristi.py | 77e0ac5f3cab706c81eaf141dfa905820154edb2 | [] | no_license | AhmadDzakiN/tugas-DDP1 | 396350dead612c340325bf4cea97aa6667e420a4 | a506fff5779a0bee5adc38b28b353d7fed70e123 | refs/heads/master | 2022-10-29T09:35:29.308562 | 2020-06-15T02:11:31 | 2020-06-15T02:11:31 | 272,250,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | jumlah=int(input("Masukan Jumlah: "))
email=[]
domain={}
for z in range(jumlah):
mail=input("Masukan Email: ")
mail = mail.split("@")
email.append(mail[1])
for z in email:
domain[z] = domain.get(z,0) + 1
for z in domain:
print(domain[z], "address dengan domain", z)
| [
"lord.dzaki66@gmail.com"
] | lord.dzaki66@gmail.com |
1a9a0fb6b2994bf1013e415e829d428c2fc5f781 | e3eda7495b2e03cb87fa20422069a1219f3aa1b7 | /partie2.py | 3eed7676c420967c642bd986b98e2660f3501d8c | [] | no_license | fberrabah/Exercice-d-introduction-Python | 82c9db80ca22d3e50c9b240a3d266b7e720e6582 | 5f82e0c2dcd636ad268315380e7079676259ba45 | refs/heads/master | 2020-08-14T23:28:42.487612 | 2019-10-20T19:50:50 | 2019-10-20T19:50:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | #Condition True False
print("Exercice 1\n")
Paris = ""
Marseille = "NUL"
print(len(Paris)==0)
print(len(Marseille)==0)
#Calculer l'age
print("Exercice 2\n")
date = input('Veuillez entrer l année en cours : ')
naissance = input ('Ainsi que votre année de naissance : ')
date = int(date)
naissance = int(naissance)
s=date - naissance
print(s)
voisin = input ("Quel âge à votre voisin :")
voisin = int(voisin)
v=s + voisin
v=str(v)
print("Vous avez " + v +"ans avec votre voisin")
#calculer le cout total avec %
print("Exercice 3\n")
chaussure = 70
jean = 59
tshirt = 20
total = chaussure+jean+tshirt
print("Le cout total de votre shooping est de {}".format(total *0.80))
#Faire une calculatrice
print("Exercice 4\n")
calcul = float(input ("Entrer votre donnée :"))
calcul1 = float(input ("Entrer votre nouvelle donnée :"))
print("Voici le résultat de vos 2 données : {}".format(calcul+calcul1))
#demande de nom sur le programme
print("Exercice 5\n")
prénom = input ("Entrer votre prénom :").upper()
nom = input ("Entrer votre nom :").upper()
print(prénom[0]+prénom[-1])
print(nom[0]+nom[-1])
print(prénom[0]+prénom[-1]+nom[0]+nom[-1])
age = int(input ("Entrer votre age:"))
total=round(age/33)
print("votre age divisé par 33 est :{}".format(total)) | [
"farid.berrabah@gmail.com"
] | farid.berrabah@gmail.com |
9db3b7c486e2381b8f3434d7cf577129eb388411 | a1efd481121f9af645dd201c98dade4226e514fb | /nu_ners/extract_brands_ners_adhoc.py | 50bab0223a8aa618fcada30a59b9b4ad4f4ca1be | [] | no_license | stcybrdgs/wxMatchingEngine | 50ebdf6aee91cd91532a8c385c164e679016e71b | a32a6f984c084bab3ab4d74671ceb02af55bfe8b | refs/heads/master | 2020-05-31T11:39:50.934850 | 2019-11-15T21:47:23 | 2019-11-15T21:47:23 | 190,264,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,285 | py | #!/usr/bin/env python
# coding: utf8
# Compatible with: spaCy v2.0.0+
# extract_brands_ners_adhoc.py
"""
Wed, Oct 23, 2019
Stacy Bridges
"""
# EXTRACT BRANDS
# get data file input
# get brand input
# choose which data column to use for extraction
# - program presents user with menu of options
# get fresh model
# - use entity ruler and brands input to map brands
# - chunk as needed
# get data column
# - turn into nlp
# - chunk as needed
# extract brands into single column
# - extract as list of distinct brands
# - if brands are already in the column, preserve them
# after extracting, run script to identify primary brand
# IMPORTS =====================================
import os, sys, csv, json
import spacy
from spacy import displacy
from spacy.pipeline import EntityRuler
from spacy.pipeline import Tagger
from spacy.language import Language
from pathlib import Path
from spacy.lemmatizer import Lemmatizer
from spacy.lang.en import LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES
from spacy.lang.en.stop_words import STOP_WORDS
import unicodedata # use to normalize international characters
import pandas as pd
from pandas import ExcelWriter
import numpy as np
# PATHS =======================================
#sys.path.append('../../../preprocessor')
# IMPORT PY FILES =============================
import py_string_cleaner
import menu
# GLOBALS =====================================
global row_heads
row_heads = []
df_tender = []
tender_col_choices = []
tender_col_nums = []
# FUNCTIONS ===================================
def sentence_segmenter(doc):
for token in doc:
if token.text == 'wrwx':
doc[token.i].is_sent_start = True
return doc
# end function //
def get_column_choice(tender_file):
print('\nBuilding column menu...')
global df_tender
# get the columns from the file
# df.columns.tolist()
tender_col_choices = []
tender_col_nums = []
df_tender = pd.read_excel(tender_file, sheet_name=0) # read tender file into dataframe
for head in df_tender:
tender_col_choices.append(head) # copy tender headers into array
# print user menu
print('\n-----------------------------------------')
print(' Tender Columns')
print('-----------------------------------------')
spacer =' '
print('{}{}{}'.format('m', spacer, 'Show Main Menu'))
tender_col_nums.append('m')
col_num = ''
i = 0
for tc in tender_col_choices:
i += 1
print('{} {}'.format(i, tc))
tender_col_nums.append(str(i))
# get user input
print('\nSelect the column for extracting brands (or \'m\' for Main Menu)')
col_choice = input()
# validate user input
while col_choice not in tender_col_nums:
print('Invalid choice! Select a column (or \'m\' for Main Menu)')
col_choice = input()
if col_choice == 'm':
menu.main()
# if the user chooses 'm', then program control goes back to menu.main(),
# which means that when menu.main() terminates, the program control will
# return to this program; therefore, it's important to invoke sys.exit()
# upon the callback to terminate all py execution in the terminal
sys.exit()
else:
col_choice = tender_col_choices[int(col_choice)-1]
print('\nYou chose: {}'.format(col_choice))
#print(jsonl_files)
return col_choice
# end function //
def create_tender_csv(tender_file):
global df_tender
# get name of column that user wants to use to extract brands
# and turn that colummn into a csv file
# return the full path of the csv to the calling function
column_choice = get_column_choice(tender_file) # get user's choice of column to extract brand from
# get column_choice from tender_file and turn the col into a csv
print('\nCreating csv file of selected tender column...\n')
data = df_tender[column_choice]
folder_path = os.path.dirname(os.path.abspath(__file__))
csv_filename = folder_path + '\\' + 'tender_brand.csv'
with open(csv_filename, 'w', encoding='utf-8') as outfile: # encoding handles charmap errors
outfile.write('description\n')
for line in data:
line = str(line)
line = unicodedata.normalize('NFKD', line).encode('ASCII', 'ignore') # convert int'l chars
line = line.decode('utf-8') # convert bytes to strings
outfile.write(str(line) + '\n')
#outfile.write('\n')
print(line)
print('\n\nThe selected data column was written to the csv file at:\n{}'.format(csv_filename))
print('\nPress \'Enter\' to continue...')
input()
return csv_filename
# end function //
def import_csv(d):
global row_heads
doc = ''
with open(d) as data:
csv_reader = csv.reader(data, delimiter='|')
i = 0
for row in csv_reader:
# populate row_heads[]
#if i > 0: # skip header row
row_head = row[0]
row_heads.append(row_head)
# populate txt obj
if i == 0:
# add top anchor to keep displacy from collapsing
doc = doc + 'wrwxstart ' + ('|'.join(row) + '\n')
else:
doc = doc + 'wrwx ' + ('|'.join(row) + '\n')
i += 1
# add bottom anchor to keep displacy from collapsing
doc = doc + 'wrwxend ' + ('|'.join(row) + '\n')
return doc
# end function //
# MAIN ========================================
def main(patterns_file, tender_file):
'''
NERS Demo w/ Sample Data
'''
print('module: extract_brands_ners_adhoc.py')
print('\n')
#print(patterns_file)
#print(tender_file)
#sys.exit()
# CONFIG -------------------------------------------------- \\
# ------------------------------------------------------------ \\
# brnd, mpn, spplr
model = 'pre' # pre -> use non-trained model / post -> use trained model
brnd = 'on' # on/off
ruler = 'on'
cleaner = 'on'
number_tagger = 'off'
# rem if stemmer is turned on after model does P2 training, then
# you will need to use POS tag to detect nouns in products
# then create new generator patterns for all.json
# then run entity ruler again
# stemmer = 'off'
#outFile = r'C:\Users\stacy\Desktop\IESA Project - Europe\IESA Phase 2\ners\ners_brand_patterns.jsonl'
# declare outputs
# brnd_pandas_file = r'C:\Users\stacy\Desktop\IESA Project - Europe\IESA Phase 2\ners\ners_extracted_brands.xlsx' # output
# wx_1_file = r'C:\Users\stacy\Desktop\IESA Project - Europe\IESA Phase 2\ners\test_data_cln_org_iesa_PPE_wx_v1.xlsx' # output
# declare inputs
#brnd_file = r'C:\Users\stacy\Desktop\IESA Project - Europe\IESA Phase 2\ners\ners_brand_patterns.jsonl' # input
#patterns_file = brnd_file
# rem tender_file = user-selected column from wx_1_file dataframe TENDER
#tender_file = r'C:\Users\stacy\Desktop\IESA Project - Europe\IESA Phase 2\ners\test_brands_old_input.csv'
#tender_file = r'C:\Users\stacy\Desktop\NERS Demo\descriptions_nonstock.csv'
write_type = 'w'
# ------------------------------------------------------------ //
# ---------------------------------------------------------- //
# SETUP PD DATAFRAMES -----------------------------------------------------
# read Brands infile into pd dataframe
# read Tender infile into pd dataframe
# load model
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner']) #('en_core_web_sm', disable=['parser'])
#elif model == 'post':nlp = spacy.load('demo_model')
nlp.add_pipe(sentence_segmenter, after='tagger')
# add pipes
if ruler == 'on':
# load patterns from external file only if model is not already trained
nu_ruler = EntityRuler(nlp).from_disk(patterns_file)
# putting the ruler before ner will override ner decisions in favor of ruler patterns
nlp.add_pipe(nu_ruler)#, before='ner')
'''
# remember to swap precedence between ruler and ner after model training
if model == 'post':
# load patterns from external file only if model is not already trained
if "entity_ruler" not in nlp.pipe_names:
nu_ruler = EntityRuler(nlp).from_disk(patterns_file)
# putting the ner before ruler will override favor ner decisions
nlp.add_pipe(nu_ruler)#, before='ner')
'''
# ask user to select a column from the user-selected data file
# and turn it into a csv file that can be imported by NERS
tender_col_csv = create_tender_csv(tender_file) # create the csv and return csv filename
tender = import_csv(tender_col_csv) # import the csv
print('\nCleaning the tender input...')
if cleaner == 'on':
tender = py_string_cleaner.clean_doc(tender) # clean
doc = nlp(tender)
print('\nExtracting brands...')
# show pipeline components:
print(nlp.pipe_names)
# COUNT ENTITIES ----------------------------------------------------------
labels = []
alt_labels = []
print('\n')
labels = ['BRND', 'WRWXSTART', 'WRWXEND'] # , 'PRODUCT', 'MPN', 'SKU']
alt_labels = ['Brnd', 'WrWxStart', 'WrWxEnd'] # , 'Product', 'MfrPartNo', 'SkuID']
total_found = []
total_unique_found = []
for label in labels:
tot_num = 0
unique_num = 0
unique = []
for ent in doc.ents:
# print([ent.text, ent.label_], end='')
if ent.label_ == 'BRND':
if ent.text not in unique:
unique.append(ent.text)
unique_num += 1
tot_num += 1
#print('\nFound {} total, {} unique.\n'.format(tot_num, unique_num))
total_found.append(tot_num)
total_unique_found.append(unique_num)
# pandas output for brnds ------------------------------------------------
# This technique allows you to isolate entities on
# a sentence-by-sentence basis, which will allow
# for matching entities on a record-by-record basis
wBrand_ext = []
unique = []
unique_str = ''
existing_str = ''
j = 0
for sent in doc.sents:
if j > 0: # no need to process the header
existing_str = str(df_tender['wBrand_all'][j-1]).lower() # !!! ------------------------ !!!
if existing_str == 'nan': # !!! ------------------------ !!
existing_str = '' # !!! ------------------------ !!
for ent in sent.ents:
if ent.label_ == 'BRND':
# add condition 'and (existing_str.find(ent.text) < 0)'
# to account for any brands already extracted by prior runs
if ent.text not in unique and (existing_str.find(ent.text) < 0): # !!! -------- !!!
unique.append(ent.text)
brnd_count = 0
for brnd in unique:
delimiter = ''
brnd_count += 1
if brnd_count == len(unique):
brnd_delimiter = ''
else:
brnd_delimiter = ', '
unique_str = unique_str + brnd + brnd_delimiter
if existing_str != '' and unique_str != '':
unique_str = existing_str + ', ' + unique_str # add new brands to those from prior runs # !!! -------- !!!
elif existing_str != '' and unique_str == '':
unique_str = existing_str
unique_str = unique_str.upper()
# trim trailing commas
#if unique_str[len(unique_str)-1:len(unique_str)] == ',': # !!! -------- !!!
# unique_str = unique_str[0:len(unique_str)-1] # !!! -------- !!!
wBrand_ext.append(unique_str)
print(j) # print record account to console
unique.clear() # reset var for next record
unique_str = '' # reset var for next record
j += 1
# FOR THE CHUNKER
# It basically creates a new dataframe object with the new data row
# at the end of the dataframe. The old dataframe will be unchanged.
# data = [{'Region':'East','Company':'Shop Rite','Product':'Fruits','Month':'December','Sales': 1265}]
# df.append(data,ignore_index=True,sort=False)
# DataFrame.insert(self, loc, column, value, allow_duplicates=False)
# loc : int # insertion index, must verify0 <= loc <= len(cols)
# column: string, number, or hashable object -- this is label of inserted col
# value: int, Series, or array-like
# allow_duplicates: bool, optional
# SETUP DATAFRAME ---------------------------------------------------------
# first, combine newly extracted brands with any brands that already exist
# in the wBrand_all column of the wx_v1 file
'''
nu_wBrand_all = [] # use this [] to combine wBrand_ext with wBrand_all
nu_unique = []
for row in df_tender[wBrand_all]:
for str in row
if
'''
#df_ofile = r'C:\Users\stacy\Desktop\IESA Project - Europe\IESA Phase 2\ners\db_data_cln_org_iesa_PPE_wx_v1.xlsx'
df_ofile = tender_file
df_del_dict = {}
for head in df_tender:
if head == 'wBrand_all':
df_del_dict.update({head:wBrand_ext})
else:
df_del_dict.update({head:df_tender[head]})
df_del = pd.DataFrame(df_del_dict)
writer = pd.ExcelWriter(df_ofile)
df_del.to_excel(writer, 'TestData', index=False)
writer.save()
# save the model ----------------------------------------------------------
# save model with entity pattern updates made by the entity ruler
output_dir = Path('ners_adhoc_model')
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
print("\nNERS Model was saved to ", output_dir)
print('Extracted Brands saved to:\n', df_ofile)
# TEST -----------------------------
#mpns = []
# DISPLACY VISUALIZER -----------------------------------------------------
# get results for html doc
results = ''
i = 0
for item in alt_labels:
if item == 'Brnd':
results = results + '{}: {} tot {} unq\n'.format(item, total_found[i], total_unique_found[i])
i += 1
# store nlp object as string in html var
spacer = '---------------------------------------------------------\n'
header = 'Named Entities Found in Target File:\n'
doc = nlp(header + spacer + results + spacer + tender)
doc.user_data["title"] = "Named Entity Resolution System (NERS)"
colors = {"BRND": "#FFDDA1", "WRWXSTART":"#ADC9E8", "WRWXEND":"#ADC9E8"} # blue: #0075C9 | lt blue: #ADC9E8
options = {"ents": ["MPN", "BRND", "WRWXSTART", "WRWXEND"], "colors": colors}
html = displacy.render(doc, style="ent", page=True, options=options) # use the entity visualizer
# write the html string displacy folder
folder_path = os.path.dirname(os.path.abspath(__file__))
ofile = folder_path + '\\' + 'displacy\\index.html'
with open(ofile, 'w') as data:
data.write(html)
print('\n' + results)
# end program
print('Done.')
if __name__ == '__main__' : main()
| [
"stcybrdgs@gmail.com"
] | stcybrdgs@gmail.com |
065a54d19c7a5ac9c2a38d3758fe745d16eff0fd | f2286fe652d709b2a98f41336da4e329f6098c4f | /UVA/637.py | 292f33f682bc11867eb3d88053ed4651dda5f4e0 | [] | no_license | michaelgy/PROBLEMS_PROGRAMMING | df984d2e33ad4c89f5b4c98c4429673b1c5ed16f | 3a942d356badff15fbc417d1d18ecf373843fa28 | refs/heads/master | 2021-06-04T08:43:12.111200 | 2021-01-21T06:41:23 | 2021-01-21T06:41:23 | 106,240,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | import math
n = int(input())
while n:
print("Printing order for {} pages:".format(n))
if n == 1:
print("Sheet 1, front: Blank, 1")
else:
sheets = math.ceil(n/4)
pages = sheets*4
fw,bw = 1,pages
for e in range(sheets):
for i in range(2):
if i:
text = "back "
v1 = fw
v2 = bw if bw <= n else "Blank"
else:
text = "front"
v1 = bw if bw <= n else "Blank"
v2 = fw
fw +=1
bw -=1
print("Sheet {}, {}: {}, {}".format(e+1,text,v1,v2))
n = int(input())
| [
"noreply@github.com"
] | michaelgy.noreply@github.com |
451ba13fc5151a0a0206ee7c72d130f6de6efa53 | 821a4dc5c7c5dd49e77079b850b6ab1e18a7c114 | /digitalImageFiltering.py | a00fab16bc9f37bff07e2d17794a87c272fd7744 | [] | no_license | abrahamleyva/digitalImageFiltering | 0c065e38a7935b2e3614a3c6aa6a0645a5990aab | 13adcd7ee7e4159e4fd32ec02fc41eb1dcd2e76d | refs/heads/master | 2021-01-09T05:22:29.759614 | 2017-02-09T05:22:18 | 2017-02-09T05:22:18 | 80,756,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,515 | py | #Author: Abraham Medina
#Link to github https://github.com/abrahamleyva/digitalImageFiltering
from PIL import Image #Import Pillow Library
import os.path #Library for finiding number of fliles in a directory
def median(arr): #Finds the median of an array
arr = sorted(arr) #Sorts the array
if len(arr) == 0:
return None
elif len(arr) % 2 == 0:
return (arr[len(arr) / 2 - 1] + arr[len(arr) / 2]) / 2
else:
return arr[len(arr) / 2]
path = 'images' #setting path to the directory that holds the images
num_files = len([f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]) - 1 #num_files is set to the number of images in a directory
imgArr = [None] * num_files #Defines an empty array for the raw images
for i in range(0, num_files):
imgArr[i] = Image.open("images/" + str(i + 1) + ".png") #Sets the raw images to the array
width, height = imgArr[0].size #Saves the size of the images
#Creats an empty array of size num_files for each color
redArr = [None] * num_files
greenArr = [None] * num_files
blueArr = [None] * num_files
#Inicializes variables for the final RGB for every pixel
finalR = 0;
finalG = 0;
finalB = 0;
imgArr[1].save("images/final.png") #Creats a copy of the first image
newImg = Image.open("images/final.png") #Opens and saves the new raw image
finalPix = newImg.load() #Makes the new image able to be processes
pixArr = [None] * num_files #Defines an empty array of images that can be processed
print("Processing...")
for i in range(0, num_files):
pixArr[i] = imgArr[i].load() #Sets the processable images to pixArr
for y in range(0, height): #Goes through vertical pixels
for x in range(0, width): #Goes through horizontal pixels
for a in range(0, num_files): #Goes through every image
pix = pixArr[a] #Sets a particular processable image to a temp variable
redArr[a], greenArr[a], blueArr[a] = pix[x, y] #Takes pixel info from the temp image and sets it to multiple arrays
finalR = median(redArr) #Gets median red value and sets it to the final red variable
finalG = median(greenArr) #Gets median green value and sets it to the final green variable
finalB = median(blueArr) #Gets median blue value and sets it to the final blue variable
finalPix[x, y] = (finalR, finalG, finalB) #Sets the median RGB values to the pixel of the final image
newImg.save("images/final.png") #Saves the final image
print("Complete!") | [
"abrahamedina24@gmail.com"
] | abrahamedina24@gmail.com |
8167f530c8d621c842f6bfa73f8385e93930cb17 | 8d19a238bab9bb4464df698d12ec1e7ab1b5fe31 | /server/petslife/wsgi.py | 32c6ce61283e3a07a0480eb0319040829ebbd622 | [] | no_license | Angel-Chang/PLA | 24ef0820938ec57c613fa3dd33eef21d99918f4e | 53f16788256eead2f18e454cc3b079426753c2bb | refs/heads/master | 2023-07-13T01:24:57.696994 | 2021-08-24T14:57:57 | 2021-08-24T14:57:57 | 399,505,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for petslife project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'petslife.settings')
application = get_wsgi_application()
| [
"huichi.chang@gmail.com"
] | huichi.chang@gmail.com |
2e7c6c32a7ebb0da9fef253485580ef2ec69796b | beb68350b8f4067ded6d0e51c8276f69a42cd6cb | /APPOINTMENT_APP/api/user.py | 77f8835067eb963f9dcc69155dbc7499a84f16f6 | [] | no_license | karthikravinatha/appointment-backend | 15fe773d4f62b01f12225a269d8d5a7f34cf8e8d | fb491d05936a262c8402d858afb2d902fd16925b | refs/heads/master | 2022-05-27T19:09:29.575555 | 2020-05-03T16:45:50 | 2020-05-03T16:45:50 | 260,975,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,824 | py | """
Created By : <Auto generated code>
Created On :
Reviewed By :
Reviewed On :
Version :
"""
import json
from django.http import HttpRequest
from APPOINTMENT_APP.api.base_controller import BaseController
from APPOINTMENT_APP.models.user import UserModel
from APPOINTMENT_APP.services.user_service import userService
from APPOINTMENT_APP.utils.constants.constants import DataTypes, HttpMethodType, AppConstants
from APPOINTMENT_APP.utils.helpers.general_helper import IntHelper, FloatHelper
from APPOINTMENT_APP.utils.helpers.request_helper import RequestConfig, ParamsObject
class user(BaseController):
def __init__(self, request):
BaseController.__init__(self, request)
def add(self, request: HttpRequest):
user_json = json.loads(request.POST.get("user_json"))
ext_params = self.get_entity_user_ext_params()
user_object: UserModel = UserModel()
user_object.user_name = user_json.get("user_name")
user_object.mobile_number = user_json.get("mobile_number")
user_object.landline_number = user_json.get("landline_number")
user_object.email = user_json.get("email")
user_object.user_password = user_json.get("user_password")
user_object.parent_id = user_json.get("parent_id")
user_object.designation = user_json.get("designation")
user_object.files = user_json.get("files")
user_service: userService = userService(ext_params)
user_object = user_service.add(user_object)
return self.send_response(user_object)
def update(self, request: HttpRequest):
user_json = json.loads(request.POST.get("user_json"))
user_object: UserModel = UserModel()
user_object.created_on = user_json.get("created_on")
user_object.mobile_number = user_json.get("mobile_number")
user_object.designation = user_json.get("designation")
user_object.parent_id = user_json.get("parent_id")
user_object.email = user_json.get("email")
user_object.photo = user_json.get("photo")
user_object.is_delete = user_json.get("is_delete")
user_object.status = user_json.get("status")
user_object.is_primary = user_json.get("is_primary")
user_object.user_id = user_json.get("user_id")
user_object.landline_number = user_json.get("landline_number")
user_object.user_name = user_json.get("user_name")
user_object.last_updated_by = user_json.get("last_updated_by")
user_object.user_password = user_json.get("user_password")
user_object.created_by = user_json.get("created_by")
user_object.last_updated_on = user_json.get("last_updated_on")
user_service: userService = userService()
user_object = user_service.update(user_object)
return self.send_response(user_object)
def delete(self, request: HttpRequest):
user_json = json.loads(request.POST.get("user_json"))
user_object: UserModel = UserModel()
user_object.created_on = user_json.get("created_on")
user_object.mobile_number = user_json.get("mobile_number")
user_object.designation = user_json.get("designation")
user_object.parent_id = user_json.get("parent_id")
user_object.email = user_json.get("email")
user_object.photo = user_json.get("photo")
user_object.is_delete = user_json.get("is_delete")
user_object.status = user_json.get("status")
user_object.is_primary = user_json.get("is_primary")
user_object.user_id = user_json.get("user_id")
user_object.landline_number = user_json.get("landline_number")
user_object.user_name = user_json.get("user_name")
user_object.last_updated_by = user_json.get("last_updated_by")
user_object.user_password = user_json.get("user_password")
user_object.created_by = user_json.get("created_by")
user_object.last_updated_on = user_json.get("last_updated_on")
user_service: userService = userService()
user_object = user_service.delete(user_object)
return self.send_response(user_object)
def get(self, request: HttpRequest):
params = [
{"id": RequestConfig(from_session=False, nullable=False, datatype=DataTypes.INT)}
]
params: ParamsObject = self.convert_params(request, HttpMethodType.get, params)
user_service: userService = userService()
data = user_service.get(params)
return self.send_response(data)
def get_list(self, request: HttpRequest):
params = [
{"ids": RequestConfig(from_session=False, nullable=False, datatype=DataTypes.STRING, default='')}
]
params: ParamsObject = self.convert_params(request, HttpMethodType.get, params)
user_service: userService = userService()
data = user_service.get_list(params)
return self.send_response(data)
def get_object(self, request: HttpRequest):
params = []
params: ParamsObject = self.convert_params(request, HttpMethodType.get, params)
user_service: userService = userService()
data = user_service.get_object(params)
return self.send_response(data)
def get_list_object(self, request: HttpRequest):
params = []
params: ParamsObject = self.convert_params(request, HttpMethodType.get, params)
user_service: userService = userService()
data = user_service.get_list_object(params)
return self.send_response(data)
def get_list_object_page(self, request: HttpRequest):
params = []
params: ParamsObject = self.convert_params(request, HttpMethodType.get, params)
user_service: userService = userService()
data = user_service.get_list_object_paginated(params)
return self.send_response(data)
| [
"karthikravinatha@gmail.com"
] | karthikravinatha@gmail.com |
5a216fa5de0a4c69886fdbfc02633f755a409b30 | bd8122aa20f4318f690f5d7686bc44b3c739fb59 | /src/saml2test/jsonconfig.py | f7613b91833494b04649a8a411e6deb2f81d5996 | [
"BSD-2-Clause"
] | permissive | identinetics/saml2test2 | 5cda487ffa4facb38b1036cdb6db7c178b3e4b2e | 0e439096c7a217a56e7c203f9001c0f9e2dedaa0 | refs/heads/master | 2021-01-21T04:03:23.133739 | 2016-11-09T22:43:15 | 2016-11-09T22:43:15 | 46,069,892 | 3 | 1 | null | 2016-05-31T14:20:40 | 2015-11-12T17:47:08 | Python | UTF-8 | Python | false | false | 427 | py | from saml2test.baseconfig import BaseConfig
class JsonConfig(BaseConfig):
def __init__(self,json_data,configdir):
self.json_data = json_data
self.CONFIG_SRC_DIR = configdir
super(JsonConfig, self).__init__()
def config(self):
super(JsonConfig,self).config()
for key in self.json_data.keys():
val = self.json_data[key]
setattr(self,key,val)
pass | [
"thomas@warwaris.at"
] | thomas@warwaris.at |
3bd19a7d4ef17a4ca4db31969790ca90fb78b0ec | c28b2897c899d254b99a0db0127a62ce94331c61 | /train_rc_slo.py | ad15f2440db53bc31daa0cbb6cd2ed59dcbb092a | [] | no_license | sindhura234/Dense_Unet_Keras | 00940314de4f1dfee9d611956e3cf0cb4440aec0 | a62ae052a7da554fa0655fef05b65b123fe36c83 | refs/heads/master | 2023-04-13T18:08:56.555227 | 2019-11-18T19:56:26 | 2019-11-18T19:56:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,669 | py | import os
import numpy as np
import cv2
from keras.callbacks import TensorBoard, ModelCheckpoint
np.random.seed(42)
import scipy.misc as mc
data_location = ''
training_images_loc = data_location + 'RC_SLO/train/image/'
training_label_loc = data_location + 'RC_SLO/train/label/'
testing_images_loc = data_location + 'RC_SLO/test/image/'
testing_label_loc = data_location + 'RC_SLO/test/label/'
train_files = os.listdir(training_images_loc)
train_data = []
train_label = []
desired_size=368
for i in train_files:
im = mc.imread(training_images_loc + i)
label = mc.imread(training_label_loc + i.split(".")[0] + "_GT.tif")
old_size = im.shape[:2] # old_size is in (height, width) format
delta_w = desired_size - old_size[1]
delta_h = desired_size - old_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
color = [0, 0, 0]
color2 = [0]
new_im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color)
new_label = cv2.copyMakeBorder(label, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color2)
train_data.append(cv2.resize(new_im, (desired_size, desired_size)))
temp = cv2.resize(new_label,(desired_size, desired_size))
_, temp = cv2.threshold(temp, 127, 255, cv2.THRESH_BINARY)
train_label.append(temp)
train_data = np.array(train_data)
train_label = np.array(train_label)
test_files = os.listdir(testing_images_loc)
test_data = []
test_label = []
x_train = train_data.astype('float32') / 255.
y_train = train_label.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), desired_size, desired_size, 3)) # adapt this if using `channels_first` image data format
y_train = np.reshape(y_train, (len(y_train), desired_size, desired_size, 1)) # adapt this if using `channels_first` im
TensorBoard(log_dir='./autoencoder', histogram_freq=0,
write_graph=True, write_images=True)
from DenseUNet import *
model=DenseUNet(input_size=(desired_size,desired_size,3),start_neurons=16,keep_prob=0.9,block_size=7)
weight="Model/RC_SLO/DenseUNet.h5"
restore=False
if restore and os.path.isfile(weight):
model.load_weights(weight)
model_checkpoint = ModelCheckpoint(weight, monitor='val_acc', verbose=1, save_best_only=True)
model.fit(x_train, y_train,
epochs=300,
batch_size=1,
validation_split=0.1,
# validation_data=(x_test, y_test),
shuffle=True,
callbacks= [TensorBoard(log_dir='./autoencoder'), model_checkpoint])
| [
"clguo.ai@gmail.com"
] | clguo.ai@gmail.com |
4268c45f6d8f1b9d2b86c3c61dbe28db25668d13 | 37f7581ad37725b027d1815145d167f84bcd247e | /union/admin.py | 19dbb910eb748cfbdc645aab85ce5c534a0617f4 | [
"LicenseRef-scancode-sata"
] | permissive | HASSAN1A/Student-Union | 4f0c61db1bcbb7823b0b7e7472c60ee9c7e9973a | c1ed6941479216e2cf9aaa801dd9ab0288c2c526 | refs/heads/master | 2023-03-20T14:53:10.525231 | 2020-12-10T11:04:43 | 2020-12-10T11:04:43 | 319,734,757 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | from django.contrib import admin
from .models import StudentUnion,Business,Post,EmergencyService
# Register your models here.
admin.site.register(StudentUnion)
admin.site.register(Business)
admin.site.register(Post)
admin.site.register(EmergencyService)
| [
"okothhassanjuma@gmail.com"
] | okothhassanjuma@gmail.com |
f70edf2762baabf819ddd95626d014e08e3511df | 34905a806594f9b34f25a1d8c2271bd52b9e25f1 | /src/main.py | 94fffd9c859a283c2e342d035dab2fda4a442206 | [] | no_license | christianwbsn/richeese-transformator | b64c00e56f735bf8236b04ad6bf3adab2eb118d1 | 3d09422c6bac47a9fae235bb1eec6fc30129c681 | refs/heads/master | 2021-09-17T02:02:23.743290 | 2018-06-26T15:18:00 | 2018-06-26T15:18:00 | 110,575,707 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,344 | py | from __future__ import print_function
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from math import pi,sin,cos
import thread
from time import sleep
import copy
import transformation
quited = False
NbVertex = 0
Vertices = []
initVertices=[]
def init_draw2d():
glPushMatrix() #setup for drawing
glLoadIdentity()
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
gluOrtho2D(-600,600,-500,500)
glDepthFunc(GL_ALWAYS)
def drawPolygon(list_point):
global NbVertex
glEnable (GL_BLEND)
glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glColor4f(0, 0.482 ,0.655,0.7)
glEnableClientState(GL_VERTEX_ARRAY)
glVertexPointer(3, GL_FLOAT, 0,list_point)
glDrawArrays(GL_POLYGON, 0, int(NbVertex))
glColor4f(0, 0.0 ,0.0, 0.8)
glDrawArrays(GL_LINE_LOOP, 0, int(NbVertex))
glDisableClientState(GL_VERTEX_ARRAY)
def uninit_draw2d():
glDepthFunc(GL_LESS)
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
def print_text(str, x, y, r, g, b):
glColor3f(r,g,b) #set text color
glRasterPos2f(x,y)
for i in str:
glutBitmapCharacter(GLUT_BITMAP_8_BY_13, ord(i))
def draw_axis():
glLineWidth(1.5);
glColor3f(0, 0, 0)
glBegin(GL_LINES)
glVertex3f(-600,0,0) #draw horizontal lines of x
glVertex3f(600,0,0)
glEnd()
glBegin(GL_LINES)
glVertex3f(0,-600,0) #draw vertical lines of y
glVertex3f(0,600,0)
glEnd()
glBegin(GL_LINES)
glVertex3f(0,0,-600)
glVertex3f(0,0,600)
glEnd()
def init_gl():
glutInit()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH)
glutInitWindowSize(1000, 800) #init windows size
glutInitWindowPosition(200,0) #init windows position
glutCreateWindow("Richeese Transformator") #add windows title
glutDisplayFunc(draw) #set displayfunction
glutIdleFunc(draw)
glEnable(GL_DEPTH_TEST)
glDepthFunc(GL_LEQUAL)
glShadeModel(GL_SMOOTH)
def draw_grid():
glLineWidth(0.25); #set line width
glColor3f(0.90, 0.90, 0.90)
glBegin(GL_LINES)
for i in range(-600,600,20): #draw small grid
glVertex3f(float(i), -600.0, 0.0)
glVertex3f(float(i), 600.0, 0.0)
glVertex3f(-600.0, float(i), 0.0)
glVertex3f(600.0, float(i), 0.0)
glEnd()
glLineWidth(0.5);
glColor3f(0.65, 0.65, 0.65)
glBegin(GL_LINES)
for i in range(-600,600,100): #draw main grid
glVertex3f(float(i), -600.0, 0.0)
glVertex3f(float(i), 600.0, 0.0)
glVertex3f(-600.0, float(i), 0.0)
glVertex3f(600.0, float(i), 0.0)
glEnd()
for i in range(-500,600,100): # print scale
if(i==0):
print_text(str(i),float(i)+5,-20, 0, 0, 0)
elif(i==500):
print_text(str(i), float(i)-10,5, 0, 0, 0)
else:
print_text(str(i), float(i)-10,5, 0, 0, 0)
print_text(str(i), 5,float(i)-10, 0, 0, 0)
def draw():
global quited, triangleVertices, Vertices
if(quited):
sys.exit(0)
glClearColor(1.0,1.0,1.0,0.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glViewport(0, 0, 1000, 800) #set viewport
init_draw2d()
draw_grid()
draw_axis()
drawPolygon(Vertices)
print_text('X', 580,10, 0, 0, 0)
print_text('Y', 10,480, 0, 0, 0)
uninit_draw2d()
glutSwapBuffers()
def terminal_display():
print (" _______ _____ ______ ____ ____ ________ ________ ______ ________ ")
print (" |_ ___ \ |_ _|.' ___ ||_ || _||_ __ ||_ __ |.' ____ \ |_ __ |")
print (" | |__) | | | / .' \_| | |__| | | |_ \_| | |_ \_|| (___ \_| | |_ \_|")
print (" | __ / | | | | | __ | | _| _ | _| _ '.____`. | _| _ ")
print (" _| | \ \_ _| |_\ `.___.'\ _| | | |_ _| |__/ | _| |__/ || \____) | _| |__/ |")
print ("|____| |___||_____|`.____ .'|____||____||________||________| \______.'|________|")
print (" _+_+_+_+_+_+_+_+_+_+_+_+_+_ LINEAR TRANSFORMATION _+_+_+_+_+_+_+_+_+_+_+_+_+_+_")
print ("============================ERIC JONATHAN-13516117==============================")
print ("==========================CHRISTIAN WIBISONO-13516147===========================")
def shape_input():
global NbVertex
NbVertex = raw_input('> Insert number of points : ')
P = []
global Vertices, initVertices
for i in range (int(NbVertex)): #accept input for point
x = float(raw_input("> X"+str((i+1))+" : "))
y = float(raw_input("> Y"+str((i+1))+" : "))
z = float(0)
del P[:]
P.append(x)
P.append(y)
P.append(z)
print("> Point-{0} : < {1} , {2} >".format(int(i+1),P[0],P[1]))
Vertices.append(list(P))
initVertices.append(list(P))
def command_input():
global NbVertex, Vertices, initVertices
initVertices= copy.deepcopy(Vertices)
while(True):
temp = raw_input('> Insert command : ')
command = temp.split(' ')
#memanggil perintah transformasi yang sesuai dengan input
if (command[0] == 'translate'):
transformation.translate(Vertices,NbVertex,command[1],command[2])
elif (command[0] == 'rotate'):
transformation.rotate(Vertices,NbVertex,command[1],command[2],command[3])
elif (command[0] == 'reflect'):
transformation.reflect(Vertices,NbVertex,command[1])
elif (command[0] == 'dilate'):
transformation.dilate(Vertices,NbVertex,command[1])
elif (command[0] == 'stretch'):
transformation.stretch(Vertices,NbVertex,command[1],command[2])
elif (command[0] == 'shear'):
transformation.shear(Vertices,NbVertex,command[1],command[2])
elif (command[0] == 'custom'):
TransMatrix=[]
k=[]
k.append(float(command[1]))
k.append(float(command[2]))
TransMatrix.append(list(k))
del k[:]
k.append(float(command[3]))
k.append(float(command[4]))
TransMatrix.append(list(k))
transformation.custom(Vertices,NbVertex,TransMatrix)
elif (command[0] == 'multiple'):
multi = []
for i in range(0,int(command[1])):
text = raw_input('> ')
multi.append(text)
for j in range(0,int(command[1])):
perintah = multi[j].split(' ')
if (perintah[0] == 'translate'):
transformation.translate(Vertices,NbVertex,perintah[1],perintah[2])
elif (perintah[0] == 'rotate'):
transformation.rotate(Vertices,NbVertex,perintah[1],perintah[2],perintah[3])
elif (perintah[0] == 'reflect'):
transformation.reflect(Vertices,NbVertex,perintah[1])
elif (perintah[0] == 'dilate'):
transformation.dilate(Vertices,NbVertex,perintah[1])
elif (perintah[0] == 'stretch'):
transformation.stretch(Vertices,NbVertex,perintah[1],perintah[2])
elif (perintah[0] == 'shear'):
transformation.shear(Vertices,NbVertex,perintah[1],perintah[2])
elif (perintah[0] == 'custom'):
TransMatrix=[]
k=[]
k.append(float(perintah[1]))
k.append(float(perintah[2]))
TransMatrix.append(list(k))
del k[:]
k.append(float(perintah[3]))
k.append(float(perintah[4]))
TransMatrix.append(list(k))
transformation.custom(Vertices,NbVertex,TransMatrix)
del perintah[:]
elif (command[0] == 'reset'):
dx = []
dy = []
for i in range(0,int(NbVertex)):
xx = initVertices[i][0]-Vertices[i][0]
yy = initVertices[i][1]-Vertices[i][1]
dx.append(xx)
dy.append(yy)
for frame in range(400):
for i in range(0,int(NbVertex)):
Vertices[i][0]+=float(dx[i])/400
Vertices[i][1]+=float(dy[i])/400
sleep(0.001)
elif (command[0] == 'help'):
print("> Here's some command that can be used")
print("> translate <dx> <dy> : translate x by dx and y by dy")
print("> dilate <k> : dilate by k")
print("> rotate <deg> <a> <b>: rotate by deg")
print("> reflect <param>: reflect with param")
print("> shear <param> <k>: shear with param")
print("> stretch <param> <k>: stretch with param")
print("> custom <a> <b> <c> <d>: custom transformation")
print("> multiple <n>: multiple transformation")
print("> reset: reset everything")
print("> exit: exit from program")
elif (command[0] == 'exit'):
global quited
quited = True
print("> Goodbye !")
sys.exit(0)
else:
print("> Type 'help' to see valid command")
terminal_display() #menampilkan tampilan awal
shape_input() #perintah untuk memasukan titik yang akan menjadi bentuk tertentu
thread.start_new_thread(command_input, () ) #perintah untuk memasukan command transformasi
init_gl() #perintah untuk memunculkan window yang menampilkan bentuk dan hasil transformasi
glutMainLoop()
| [
"christian.wibisono7@gmail.com"
] | christian.wibisono7@gmail.com |
37002e1ba284405e298ce91fda17c6f5fe233fc6 | 7a398f8dbcf465dc182d63dfe11a71d94a68c235 | /SyntaxEx17(aiohttp_sqlalchemy_testing)/venv/bin/pip3 | 2e1f00c6a3a4264c364e2f58765a35e9509c9839 | [] | no_license | apolonis/PythonExamples | 48b0bd6c0e86388cc2772b27fcdeffacb7d0191f | 018c3ccf0f1f57f807e8e9059afa3db408094a5c | refs/heads/master | 2020-09-20T07:48:22.233460 | 2019-12-16T14:39:34 | 2019-12-16T14:39:34 | 224,413,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | #!/home/marko/PycharmProjects/SyntaxEx17(aiohttp_sqlalchemy_testing)/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"riddickschronicles@gmail.com"
] | riddickschronicles@gmail.com | |
afb945e073cccc744f20c091fdbe317e96e32d71 | 94ad39715bdc5590079dec4f10f36ed70a1685cb | /project-mid-client2.py | fcc069d419c3507fd4d43bd4f0c254f9696f0a79 | [] | no_license | karamck/ECE-387-Midterm-Project | 8b47024ad78dcc3e27f3ab87291e5dcb6862ba57 | 06980bf9c9389325cae432a6f2fcb554837fa48a | refs/heads/master | 2020-04-25T07:31:13.532350 | 2019-03-02T00:38:37 | 2019-03-02T00:38:37 | 172,616,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,600 | py | # project-mid-client2.py
# Author: Christopher Karam
import socket # Required to create a network
import sys # Required to read command lines
from gpiozero import LED, Button
# Recommended for controling loops and methods
connected = False
# Instance variables and methods ###############################################
#
# Here is where I set all methods and variables for my client program. Each
# method has an explination, but this is where all application specific
# methods and variables would go.
#button setup
btn1 = Button(4)
btn2 = Button(17)
btn3 = Button(27)
btn4 = Button(22)
btn_close = Button(26) #button to manualy close the server
#led setup
led1 = LED(23)
led2 = LED(24)
led3 = LED(25)
led4 = LED(13)
# Method for reading button inputs.
# Creates a string of 1's and 0's of each button combined.
# 0 for pressed and 1 otherwise (compare to project-mid-client1.py).
# Also checks if the close button was pressed to close the socket.
#
# Input: the socket object
# Output: string containing each button's state
def checking_buttons(sock):
if btn_close.is_pressed :
print("CLOSING CONNECTION!")
sock.close()
connected = False
return None
if btn1.is_pressed :
b1 = "0"
else :
b1 = "1"
if btn2.is_pressed :
b2 = "0"
else :
b2 = "1"
if btn3.is_pressed :
b3 = "0"
else :
b3 = "1"
if btn4.is_pressed :
b4 = "0"
else :
b4 = "1"
return b1+b2+b3+b4
# Method that matches each character in data to the respective LED.
# (Compare to project-mid-client1.py)
#
# Input: data received by the server that was concverted to a string
def write_led(data):
if data[0] == "0" :
led1.on()
else :
led1.off()
if data[1] == "0" :
led2.on()
else :
led2.off()
if data[2] == "0" :
led3.on()
else :
led3.off()
if data[3] == "0" :
led4.on()
else :
led4.off()
# Recommended #################################################################
#
# Should be included to check that the command line input is correct. The port
# entered should be the same as the server program and the host should be the
# IPv4 of the server pi.
# make sure the command line input is correct
if len(sys.argv) != 3:
print("invalid command line: <host> <port>")
sys.exit(1)
# Main Body of Code #########################################################
#
# This code is slightly simpler than the server because all it needs to do
# is connect. There is no waiting or accepting involved. Once it is connected
# simply start a loop until the connection is lost and communicate with the server.
# setting the host and port to their intended values
host, port = sys.argv[1], int(sys.argv[2])
# Creating the socket object of the client
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
# Connect to the server
s.connect((host,port))
connected = True
print('Connected to', (host,port))
while connected:
# Sending a message to the server, breaks if close button was pressed
outMessage = checking_buttons(s)
if not outMessage:
break
outMessage = outMessage.encode('utf-8')
s.sendall(outMessage)
# Check for if the server closed or told the client to close, If not, control the LEDs
data = s.recv(1024)
data = data.decode('utf-8')
if len(data) == 0 or len(data) > 4 :
print("Server closed")
break
write_led(data)
| [
"noreply@github.com"
] | karamck.noreply@github.com |
c430193f48d5d418d4945c7fcc8aa2fe3f0325dd | 5c98cb9c500eacc14616072a0691c57b762af7a0 | /src/PyBeam/Solver/test/test_PyBeam_Sol112.py | 88ad604cdb556171206a796371bae36b29271076 | [] | no_license | SalvatoreMaraniello/SHARPy | c029900dc1b3e9a6b2f0938cbf872f73ab505d8e | f1570b257035bbdf2549f97766f0eea5f24800b2 | refs/heads/master | 2021-01-17T09:07:50.427566 | 2017-05-04T14:21:21 | 2017-05-04T14:21:21 | 21,275,945 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 43,995 | py | '''
@author: salvatore maraniello
@contact: salvatore.maraniello10@imperial.ac.uk
@date: 16 Feb 2017
@brief: unittest class to test the gravity loads implementation in the PyBeam
solution methods.
@warning: - Very low fidelity model is used for sol 312
- Sol112 and Sol112F90 produce different results (within a 0.1%
relative error)
@quote:
'''
import os
import sys
import numpy as np
import scipy.optimize as scopt
import scipy.integrate as scint
import ctypes as ct
import matplotlib.pyplot as plt
import unittest
from IPython import embed
sys.path.append( os.environ["SHARPYDIR"]+'/src' )
sys.path.append( os.environ["SHARPYDIR"]+'/src/Main' )
import SharPySettings as Settings
import DerivedTypes
from PyBeam.Solver.NonlinearStatic import Solve_Py as Sol112
from PyBeam.Solver.NonlinearStatic import Solve_F90 as Sol112F90
#from PyBeam.Solver.NonlinearDynamic import Solve_Py as Sol312
import PyLibs.numerics.integr
import PyLibs.numerics.diff
import PyLibs.plot.dyn
from PyLibs.plot.shared import fontlabel, params
import PyBeam.Utils.PostPr
import XbeamLib
import PyLibs.CVP.fourier
import lib_fem
TOL=1e-8
# ------------------------------------------------------------------------------
class Test112F90(unittest.TestCase):
'''
Each method defined in this class contains a test case.
@warning: by default, only functions whose name starts with 'test' will be
run during testing.
This test looks predominantly at boundary conditions. The beam deflections
are computed against reference analytical solutions (self.linbeamunif) from
linear theory for small displacements.
A higher tolerance is used in this test as, especially in the hinged case,
the beam extensional stiffness affects the results also for low
displacements
A higher tolerance is used in these tests because the nonlinear solution
strongly depends on the extensional stiffness EA when the beam is supported
on both sides while the linear solution neglects this effect.
A further test with low tolerance is used to check whether the solution
changes from one implementation to another.
'''
def setUp(self):
''' Common piece of code run by each test '''
self.TOL112=1e-2
self.PLOT=True
# Beam solution options
self.xbopts = DerivedTypes.Xbopts(FollowerForce = ct.c_bool(False),
FollowerForceRig=ct.c_bool(False),
MaxIterations = ct.c_int(20),
PrintInfo = ct.c_bool(True),
OutInaframe = ct.c_bool(True),
NumLoadSteps = ct.c_int(15),
Solution = ct.c_int(112),
MinDelta = ct.c_double(1e-6),
NewmarkDamp = ct.c_double(1e-2))
# Beam input
self.xbinput = DerivedTypes.Xbinput(NumNodesElem=3,NumElems=10,g=0.980)
self.xbinput.BeamLength = 1.0
self.xbinput.PsiA_G=0.0*np.array([1,0,0])
# Mass/Stiffness
mvec=np.array([.15,.15,.15,.1,.001,.001])
for ii in range(6): self.xbinput.BeamMass[ii,ii]=mvec[ii]
kvec=np.array([1e5,1e5,1e5,.15,15,15])
for ii in range(6): self.xbinput.BeamStiffness[ii,ii]=kvec[ii]
# saving options
self.savedict=Settings.SaveDict
self.savedict['OutputDir'] = Settings.SharPyProjectDir + \
'output/tests/PyBeam/NonlinearStatic/'
return self
def linbeamunif(self,xv,EI,q,BCs):
'''
Linear solution under uniform load
'''
L=xv[-1]-xv[0]
if BCs=='CF':
zv = q/(24.*EI) * xv**2 * ( 6.*L**2 - 4.*L*xv + xv**2 )
elif BCs=='ST' or BCs=='TS':
zv = q/(24.*EI) * xv * ( L**3 - 2.*L*xv**2 + xv**3 )
elif BCs=='CC':
zv = q/(24.*EI) * xv**2 * (L-xv)**2
else:
print('Analytical solution not available')
zv=xv
return zv
def test112_F90_CFlinunif(self):
self.xbinput.BConds='CF'
self.savedict['OutputFileRoot'] = 'F90beam%s_NE%0.2d' \
%( self.xbinput.BConds, self.xbinput.NumElems)
xbout=Sol112F90(self.xbinput, self.xbopts, SaveDict=self.savedict)
# Reference solution
PosDef=xbout.PosDeforStatic
PosIni=xbout.PosIni
EI=self.xbinput.BeamStiffness[-1,-1]
L=self.xbinput.BeamLength
q=-self.xbinput.g*self.xbinput.BeamMass[0,0]
if PosDef[0,0]>-1e-15: xvref=PosIni[:,0]
else: xvref=PosIni[:,0]-PosIni[0,0]
zvref = self.linbeamunif(xvref,EI,q,self.xbinput.BConds)
del xvref
TipGz_ref = zvref[-1]
# Extract SHARPy solution
Cao=XbeamLib.RotCRV(self.xbinput.PsiA_G)
PosDefG = 0.0*PosDef
for ii in range(self.xbinput.NumNodesTot):
PosDefG[ii,:]=np.dot(Cao,PosDef[ii,:])
#midnode=int((self.xbinput.NumNodesTot-1)/2)
# Extract tip position
TipGz = PosDefG[-1,2]
# check accuracy against analytical
Error = np.abs((TipGz-TipGz_ref)/TipGz_ref)
self.assertTrue(Error<self.TOL112, msg='Tip position error of %.3e '
'above tolerance %.2e!' %(Error,self.TOL112))
# check accuracy against previous numerical solution
TipGz_num = -0.0012267543687054731
Error_num = np.abs((TipGz-TipGz_num)/TipGz_num)
self.assertTrue(Error_num<TOL, msg='Tip position error of %.3e '
'above tolerance %.2e!' %(Error_num,TOL))
## Plot Tip Position
if self.PLOT:
fig = plt.figure('Deformed Beam')
ax = fig.add_subplot(111)
hsharpy, = ax.plot( PosDefG[:,0], PosDefG[:,2] ,'r+')
href, = ax.plot(PosIni[:,0], zvref,'k')
ax.set_xlabel(r'$x$ [m]')
ax.set_ylabel(r'$z$ [m]')
plt.legend((hsharpy,href,),(r'SHARPy',r'Analytical',),loc=2)
plt.savefig( self.savedict['OutputDir'] +
self.savedict['OutputFileRoot']+'.png')
#plt.show()
plt.close()
return xbout
def test112_F90_CClinunif(self):
self.xbinput.BConds='CC'
self.savedict['OutputFileRoot'] = 'F90beam%s_NE%0.2d' \
%( self.xbinput.BConds, self.xbinput.NumElems)
xbout=Sol112F90(self.xbinput, self.xbopts, SaveDict=self.savedict)
# Reference solution
midnode=int((self.xbinput.NumNodesTot-1)/2)
PosDef=xbout.PosDeforStatic
PosIni=xbout.PosIni
EI=self.xbinput.BeamStiffness[-1,-1]
L=self.xbinput.BeamLength
q=-self.xbinput.g*self.xbinput.BeamMass[0,0]
if PosDef[0,0]>-1e-15: xvref=PosIni[:,0]
else: xvref=PosIni[:,0]-PosIni[0,0]
zvref = self.linbeamunif(xvref,EI,q,self.xbinput.BConds)
del xvref
MidGz_ref = zvref[midnode]
# Extract SHARPy solution
Cao=XbeamLib.RotCRV(self.xbinput.PsiA_G)
PosDefG = 0.0*PosDef
for ii in range(self.xbinput.NumNodesTot):
PosDefG[ii,:]=np.dot(Cao,PosDef[ii,:])
# Extract tip position
MidGz = PosDefG[midnode,2]
# check accuracy against analytical
Error = np.abs((MidGz-MidGz_ref)/MidGz_ref)
self.assertTrue(Error<self.TOL112, msg='Mid position error of %.3e '
'above tolerance %.2e!' %(Error,self.TOL112))
# check accuracy against previous numerical solution
MidGz_num = -2.5704576527357301e-05
Error_num = np.abs((MidGz-MidGz_num)/MidGz_num)
self.assertTrue(Error_num<TOL, msg='Mid position error of %.3e '
'above tolerance %.2e!' %(Error_num,TOL))
## Plot Tip Position
if self.PLOT:
fig = plt.figure('Deformed Beam')
ax = fig.add_subplot(111)
hsharpy, = ax.plot( PosDefG[:,0], PosDefG[:,2] ,'r+')
href, = ax.plot(PosIni[:,0], zvref,'k')
ax.set_xlabel(r'$x$ [m]')
ax.set_ylabel(r'$z$ [m]')
plt.legend((hsharpy,href,),(r'SHARPy',r'Analytical',),loc=2)
plt.savefig( self.savedict['OutputDir'] +
self.savedict['OutputFileRoot']+'.png')
plt.close()
return xbout
def test112_F90_TSlinunif(self):
self.xbinput.BConds='TS'
self.savedict['OutputFileRoot'] = 'F90beam%s_NE%0.2d' \
%( self.xbinput.BConds, self.xbinput.NumElems)
xbout=Sol112F90(self.xbinput, self.xbopts, SaveDict=self.savedict)
# Reference solution
midnode=int((self.xbinput.NumNodesTot-1)/2)
PosDef=xbout.PosDeforStatic
PosIni=xbout.PosIni
EI=self.xbinput.BeamStiffness[-1,-1]
L=self.xbinput.BeamLength
q=-self.xbinput.g*self.xbinput.BeamMass[0,0]
if PosDef[0,0]>-1e-15: xvref=PosIni[:,0]
else: xvref=PosIni[:,0]-PosIni[0,0]
zvref = self.linbeamunif(xvref,EI,q,'TS')#self.xbinput.BConds)
del xvref
MidGz_ref = zvref[midnode]
# Extract SHARPy solution
Cao=XbeamLib.RotCRV(self.xbinput.PsiA_G)
PosDefG = 0.0*PosDef
for ii in range(self.xbinput.NumNodesTot):
PosDefG[ii,:]=np.dot(Cao,PosDef[ii,:])
# Extract tip position
MidGz = PosDefG[midnode,2]
## Plot Tip Position
if self.PLOT:
fig = plt.figure('Deformed Beam')
ax = fig.add_subplot(111)
hsharpy, = ax.plot( PosDefG[:,0], PosDefG[:,2] ,'r+')
href, = ax.plot(PosIni[:,0], zvref,'k')
ax.set_xlabel(r'$x$ [m]')
ax.set_ylabel(r'$z$ [m]')
plt.legend((hsharpy,href,),(r'SHARPy',r'Analytical',),loc=2)
plt.savefig( self.savedict['OutputDir'] +
self.savedict['OutputFileRoot']+'.png')
#plt.show()
plt.close()
# check accuracy against analytical
Error = np.abs((MidGz-MidGz_ref)/MidGz_ref)
self.assertTrue(Error<self.TOL112, msg='Mid position error of %.3e '
'above tolerance %.2e!' %(Error,self.TOL112))
# check accuracy against previous numerical solution
MidGz_num = -0.00012752922029878939
Error_num = np.abs((MidGz-MidGz_num)/MidGz_num)
self.assertTrue(Error_num<TOL, msg='Mid position error of %.3e '
'above tolerance %.2e!' %(Error_num,TOL))
return xbout
def test112_F90_TS_forced_displacements(self):
'''
Simply supported beam on both sides under gravity and forced
displacements. The beam properties are modified to obtain better
condition numbers while ensuring that shear and extentional deflections
are contained
'''
self.xbopts.NumLoadSteps=ct.c_int(20)
kvec=np.array([1e3,5e2,5e2,.3,.015,.015])
for ii in range(6): self.xbinput.BeamStiffness[ii,ii]=kvec[ii]
self.xbinput.PsiA_G=0.0*np.pi*30.0/180.*np.array([0,1,0])
self.xbinput.BConds='TS'
self.xbinput.g=9.81
# equivalent load to gravity
#ds=self.xbinput.BeamLength/(self.xbinput.NumNodesTot-1)
#dFg=-9.81*self.xbinput.BeamMass[0,0]*ds
#self.xbinput.ForceStatic[:,2]=dFg
#self.xbinput.ForceStatic[[0,-1],2]=0.5*dFg
# add forced displacement at the tip
compr_fact=0.2
self.xbinput.addForcedDisp(
node=-1,
pos=np.array([(1.-compr_fact)*self.xbinput.BeamLength,0.,0.]),
FoR='A')
self.savedict['OutputFileRoot']='catenary_beam%s_NE%0.2d_compr%.0fperc'\
%(self.xbinput.BConds,self.xbinput.NumElems,100.*compr_fact)
#xbout=Sol112(self.xbinput, self.xbopts, SaveDict=self.savedict)
xbout=Sol112F90(self.xbinput, self.xbopts, SaveDict=self.savedict)
# Extract SHARPy solution
Cao=XbeamLib.RotCRV(self.xbinput.PsiA_G)
PosDef=xbout.PosDeforStatic
PosDefG = 0.0*PosDef
for ii in range(self.xbinput.NumNodesTot):
PosDefG[ii,:]=np.dot(Cao,PosDef[ii,:])
# Extract tip position
midnode=int((self.xbinput.NumNodesTot-1)/2)
MidGz = PosDefG[midnode,2]
# Estimate length of deformed cable (check extensional deform are low)
#SegLength=np.diff(PosDef.T).T
Lcable=0.0
for nn in range(1,self.xbinput.NumNodesTot):
Lcable+=np.linalg.norm(PosDef[nn,:]-PosDef[nn-1,:])
# check numerics
shear_param = self.xbinput.BeamStiffness[1,1]/\
self.xbinput.BeamStiffness[4,4]* self.xbinput.BeamLength**2
# ------------------------- derive reference solution (catenary problem)
# utility functions
def funycable(xv,C,C1,C2):
'''Compute cable coordinates'''
return (C*np.cosh( (xv+C1)/C ) + C2)
def fundycable(xv,C,C1,C2):
'''Derivative of cable shape'''
return (np.sinh( (xv+C1)/C ))
def funLcable(xv,C,C1,C2):
'''Integrates for the cable length'''
dyv=fundycable(xv,C,C1,C2)
Iv = np.sqrt(1.+dyv**2)
return scint.trapz(Iv,x=xv)
# position in FoR G
posB=xbout.PosIni[0,:]
posT = self.xbinput.ForcedDisp[-1]['pos']
if self.xbinput.ForcedDisp[-1]['FoR']=='A':
posT=np.dot(Cao,posT)
zB,zT=posB[2],posT[2]
# cable length
Ltarget=self.xbinput.BeamLength
assert Ltarget>np.linalg.norm(posT-posB), 'Cable too short!'
# set-up nonlinear system of equations
Xv=np.linspace(posB[0],posT[0],600)
def ResEval(cv):
''' Evaluate residual associated to the constraints that the
coefficient of the catenary solution need to satisfy '''
Res=[ funycable(Xv[0],*cv)-zB, # left BC
funycable(Xv[-1],*cv)-zT, # right BC
funLcable(Xv,*cv)-Ltarget,] # Length constraint
return Res
cv0=np.array([0.3382, -0.4, -0.6036])
Sol=scopt.root(fun=ResEval,x0=cv0,jac=False,tol=1e-6)#method='SLSQP',
cvsol=Sol['x']
Yvsol=funycable(Xv,*cvsol)
## Plot Tip Position
if self.PLOT:
fig = plt.figure('Deformed Beam - F90_TS_forced_displacements')
ax = fig.add_subplot(111)
hsharpy, = ax.plot( PosDefG[:,0], PosDefG[:,2] ,'r',marker='o',lw=2,
markevery=(0.05,0.1))
href, = ax.plot( Xv, Yvsol ,'b',marker='s',markevery=(0.0,0.1))
#href,=
ax.set_xlabel(r'$x$ [m]')
ax.set_ylabel(r'$z$ [m]')
plt.legend((hsharpy,href,),(r'SHARPy',r'Analytical',),loc=2,)
plt.savefig( self.savedict['OutputDir'] +
self.savedict['OutputFileRoot']+'.png')
plt.close()
# Comparisons
# Admissible stretch of cable (estimate)
dLtension_max=0.5*self.xbinput.BeamLength*self.xbinput.g*\
self.xbinput.BeamMass[0,0]/self.xbinput.BeamStiffness[0,0]
self.assertTrue(Lcable<self.xbinput.BeamLength+dLtension_max,
msg='The numerically estimated length of the cable should be shorter'
' of the original length is extensional deflections are small')
self.assertTrue( np.abs(Lcable/self.xbinput.BeamLength-1.)<\
dLtension_max/self.xbinput.BeamLength,
msg='The length of the cable changed too much')
# numerical vs analytical
if Sol['success']:
self.assertTrue(
np.abs(np.min(Yvsol)-MidGz)/self.xbinput.BeamLength<self.TOL112,
msg='Mid displacements not matching analytical solution')
return xbout
# ------------------------------------------------------------------------------
class Test112(unittest.TestCase):
'''
Each method defined in this class contains a test case.
@warning: by default, only functions whose name starts with 'test' will be
run during testing.
This test looks predominantly at boundary conditions. The beam deflections
are computed against reference analytical solutions (self.linbeamunif) from
linear theory for small displacements.
A higher tolerance is used in these tests because the nonlinear solution
strongly depends on the extensional stiffness EA when the beam is supported
on both sides while the linear solution neglects this effect.
A further test with low tolerance is used to check whether the solution
changes from one implementation to another.
'''
def setUp(self):
'''
Common piece of code run by each test
'''
self.TOL112=1e-2
self.TOL112_num=1e-3 # numerical comparisons against Sol112F90 solution
self.PLOT=True
# Beam solution options
self.xbopts = DerivedTypes.Xbopts(FollowerForce = ct.c_bool(False),
FollowerForceRig=ct.c_bool(False),
MaxIterations = ct.c_int(20),
PrintInfo = ct.c_bool(True),
OutInaframe = ct.c_bool(True),
NumLoadSteps = ct.c_int(10),
Solution = ct.c_int(112),
MinDelta = ct.c_double(1e-6),
NewmarkDamp = ct.c_double(1e-2))
# Beam input
self.xbinput = DerivedTypes.Xbinput(NumNodesElem=3,NumElems=10,g=0.980)
self.xbinput.BeamLength = 1.0
self.xbinput.PsiA_G=30.0*np.array([1,0,0])
# Mass/Stiffness
mvec=np.array([.15,.15,.15,.1,.001,.001])
for ii in range(6): self.xbinput.BeamMass[ii,ii]=mvec[ii]
kvec=np.array([1e6,1e6,1e6,15,15,15])
for ii in range(6): self.xbinput.BeamStiffness[ii,ii]=kvec[ii]
# saving options
self.savedict=Settings.SaveDict
self.savedict['OutputDir'] = Settings.SharPyProjectDir + \
'output/tests/PyBeam/NonlinearStatic/'
return self
def linbeamunif(self,xv,EI,q,BCs):
'''
Linear solution under uniform load
'''
L=xv[-1]-xv[0]
if BCs=='CF':
zv = q/(24.*EI) * xv**2 * ( 6.*L**2 - 4.*L*xv + xv**2 )
elif BCs=='TS' or BCs=='ST':
zv = q/(24.*EI) * xv * ( L**3 - 2.*L*xv**2 + xv**3 )
elif BCs=='CC':
zv = q/(24.*EI) * xv**2 * (L-xv)**2
else:
print('Analytical solution not available')
zv=xv
return zv
def test112_CFlinunif(self):
self.xbinput.BConds='CF'
self.savedict['OutputFileRoot'] = 'beam%s_NE%0.2d' \
%( self.xbinput.BConds, self.xbinput.NumElems)
xbout=Sol112(self.xbinput, self.xbopts, SaveDict=self.savedict)
# Reference solution
PosDef=xbout.PosDeforStatic
PosIni=xbout.PosIni
EI=self.xbinput.BeamStiffness[-1,-1]
L=self.xbinput.BeamLength
q=-self.xbinput.g*self.xbinput.BeamMass[0,0]
if PosDef[0,0]>-1e-15: xvref=PosIni[:,0]
else: xvref=PosIni[:,0]-PosIni[0,0]
zvref = self.linbeamunif(xvref,EI,q,self.xbinput.BConds)
del xvref
TipGz_ref = zvref[-1]
# Extract SHARPy solution
Cao=XbeamLib.RotCRV(self.xbinput.PsiA_G)
PosDefG = 0.0*PosDef
for ii in range(self.xbinput.NumNodesTot):
PosDefG[ii,:]=np.dot(Cao,PosDef[ii,:])
#midnode=int((self.xbinput.NumNodesTot-1)/2)
# Extract tip position
TipGz = PosDefG[-1,2]
# check accuracy against analytical
Error = np.abs((TipGz-TipGz_ref)/TipGz_ref)
self.assertTrue(Error<self.TOL112, msg='Tip position error of %.3e '
'above tolerance %.2e!' %(Error,self.TOL112))
# check accuracy against previous numerical solution
TipGz_num = -0.0012260928687054726
Error_num = np.abs((TipGz-TipGz_num)/TipGz_num)
self.assertTrue(Error_num<TOL, msg='Tip position error of %.3e '
'above tolerance %.2e!' %(Error_num,TOL))
## Plot Tip Position
if self.PLOT:
fig = plt.figure('Deformed Beam')
ax = fig.add_subplot(111)
hsharpy, = ax.plot( PosDefG[:,0], PosDefG[:,2] ,'r+')
href, = ax.plot(PosIni[:,0], zvref,'k')
ax.set_xlabel(r'$x$ [m]')
ax.set_ylabel(r'$z$ [m]')
plt.legend((hsharpy,href,),(r'SHARPy',r'Analytical',),loc=2)
plt.savefig( self.savedict['OutputDir'] +
self.savedict['OutputFileRoot']+'.png')
#plt.show()
plt.close()
return xbout
def test112_CClinunif(self):
self.xbinput.BConds='CC'
self.savedict['OutputFileRoot'] = 'beam%s_NE%0.2d' \
%( self.xbinput.BConds, self.xbinput.NumElems)
xbout=Sol112(self.xbinput, self.xbopts, SaveDict=self.savedict)
# Reference solution
midnode=int((self.xbinput.NumNodesTot-1)/2)
PosDef=xbout.PosDeforStatic
PosIni=xbout.PosIni
EI=self.xbinput.BeamStiffness[-1,-1]
L=self.xbinput.BeamLength
q=-self.xbinput.g*self.xbinput.BeamMass[0,0]
if PosDef[0,0]>-1e-15: xvref=PosIni[:,0]
else: xvref=PosIni[:,0]-PosIni[0,0]
zvref = self.linbeamunif(xvref,EI,q,self.xbinput.BConds)
del xvref
MidGz_ref = zvref[midnode]
# Extract SHARPy solution
Cao=XbeamLib.RotCRV(self.xbinput.PsiA_G)
PosDefG = 0.0*PosDef
for ii in range(self.xbinput.NumNodesTot):
PosDefG[ii,:]=np.dot(Cao,PosDef[ii,:])
# Extract tip position
MidGz = PosDefG[midnode,2]
# check accuracy against analytical
Error = np.abs((MidGz-MidGz_ref)/MidGz_ref)
self.assertTrue(Error<self.TOL112, msg='Mid position error of %.3e '
'above tolerance %.2e!' %(Error,self.TOL112))
# check accuracy against previous numerical solution
MidGz_num = -2.5539140776011136e-05
Error_num = np.abs((MidGz-MidGz_num)/MidGz_num)
self.assertTrue(Error_num<TOL, msg='Mid position error of %.3e '
'above tolerance %.2e!' %(Error_num,TOL))
## Plot Tip Position
if self.PLOT:
fig = plt.figure('Deformed Beam')
ax = fig.add_subplot(111)
hsharpy, = ax.plot( PosDefG[:,0], PosDefG[:,2] ,'r+')
href, = ax.plot(PosIni[:,0], zvref,'k')
ax.set_xlabel(r'$x$ [m]')
ax.set_ylabel(r'$z$ [m]')
plt.legend((hsharpy,href,),(r'SHARPy',r'Analytical',),loc=2)
plt.savefig( self.savedict['OutputDir'] +
self.savedict['OutputFileRoot']+'.png')
plt.close()
return xbout
def test112_TSlinunif(self):
self.xbinput.BConds='ST'
self.savedict['OutputFileRoot'] = 'beam%s_NE%0.2d' \
%( self.xbinput.BConds, self.xbinput.NumElems)
xbout=Sol112(self.xbinput, self.xbopts, SaveDict=self.savedict)
# Reference solution
midnode=int((self.xbinput.NumNodesTot-1)/2)
PosDef=xbout.PosDeforStatic
PosIni=xbout.PosIni
EI=self.xbinput.BeamStiffness[-1,-1]
L=self.xbinput.BeamLength
q=-self.xbinput.g*self.xbinput.BeamMass[0,0]
if PosDef[0,0]>-1e-15: xvref=PosIni[:,0]
else: xvref=PosIni[:,0]-PosIni[0,0]
zvref = self.linbeamunif(xvref,EI,q,'TS')#self.xbinput.BConds)
del xvref
MidGz_ref = zvref[midnode]
# Extract SHARPy solution
Cao=XbeamLib.RotCRV(self.xbinput.PsiA_G)
PosDefG = 0.0*PosDef
for ii in range(self.xbinput.NumNodesTot):
PosDefG[ii,:]=np.dot(Cao,PosDef[ii,:])
# Extract tip position
MidGz = PosDefG[midnode,2]
## Plot Tip Position
self.PLOT=True
if self.PLOT:
fig = plt.figure('Deformed Beam')
ax = fig.add_subplot(111)
hsharpy, = ax.plot( PosDefG[:,0], PosDefG[:,2] ,'r+')
href, = ax.plot(PosIni[:,0], zvref,'k')
ax.set_xlabel(r'$x$ [m]')
ax.set_ylabel(r'$z$ [m]')
plt.legend((hsharpy,href,),(r'SHARPy',r'Analytical',),loc=2)
plt.savefig( self.savedict['OutputDir'] +
self.savedict['OutputFileRoot']+'.png')
plt.close()
# check accuracy against analytical
Error = np.abs((MidGz-MidGz_ref)/MidGz_ref)
self.assertTrue(Error<self.TOL112, msg='Mid position error of %.3e '
'above tolerance %.2e!' %(Error,self.TOL112))
# check accuracy against previous numerical solution
MidGz_num = -0.00012733255#4801028591
Error_num = np.abs((MidGz-MidGz_num)/MidGz_num)
self.assertTrue(Error_num<1e1*TOL, msg='Mid position error of %.3e '
'above tolerance %.2e!' %(Error_num,1e1*TOL))
return xbout
def test112_TSlinunif_linearity(self):
''' Increase load of factor 10 w.r.t. test112_SSlinunif and displacements
scale with loads '''
self.xbinput.PsiA_G=np.zeros((3,))
LoadFactor=1.
self.xbinput.BConds='ST'
self.xbinput.g=LoadFactor*self.xbinput.g
self.savedict['OutputFileRoot'] = 'beam%s_NE%0.2d_Loadfactor%.3d' \
%( self.xbinput.BConds, self.xbinput.NumElems, LoadFactor)
xbout=Sol112(self.xbinput, self.xbopts, SaveDict=self.savedict)
# Extract SHARPy solution
Cao=XbeamLib.RotCRV(self.xbinput.PsiA_G)
PosDef=xbout.PosDeforStatic
PosDefG = 0.0*PosDef
for ii in range(self.xbinput.NumNodesTot):
PosDefG[ii,:]=np.dot(Cao,PosDef[ii,:])
# Extract tip position
midnode=int((self.xbinput.NumNodesTot-1)/2)
MidGz = PosDefG[midnode,2]
# increase load factor
LoadFactor=10.
self.xbinput.BConds='ST'
self.xbinput.g=LoadFactor*self.xbinput.g
self.savedict['OutputFileRoot'] = 'beam%s_NE%0.2d_Loadfactor%.3d' \
%( self.xbinput.BConds, self.xbinput.NumElems, LoadFactor)
xbout=Sol112(self.xbinput, self.xbopts, SaveDict=self.savedict)
# Extract SHARPy solution
Cao=XbeamLib.RotCRV(self.xbinput.PsiA_G)
PosDef=xbout.PosDeforStatic
PosDefG_fact = 0.0*PosDef
for ii in range(self.xbinput.NumNodesTot):
PosDefG_fact[ii,:]=np.dot(Cao,PosDef[ii,:])
# Extract mid position
MidGz_fact = PosDefG_fact[midnode,2]
ErrorRel = np.abs(MidGz_fact/MidGz/LoadFactor-1.0)
TolFact=0.03
self.assertTrue(ErrorRel<TolFact,
msg='Factor of displacements increase %.3f is not within %.2f perc '
'of the prescribed load factor %.1f!' %(ErrorRel,TolFact,LoadFactor))
return xbout
def test112_TS_forced_displacements(self):
'''
Simply supported beam on both sides under gravity and forced
displacements. The beam properties are modified to obtain better
condition numbers while ensuring that shear and extentional deflections
are contained
'''
self.xbopts.NumLoadSteps=ct.c_int(20)
kvec=np.array([1e3,5e2,5e2,.3,.015,.015])
for ii in range(6): self.xbinput.BeamStiffness[ii,ii]=kvec[ii]
self.xbinput.PsiA_G=np.zeros((3,))
self.xbinput.BConds='TS'
self.xbinput.g=9.81
# add forced displacement at the tip
compr_fact=0.2
self.xbinput.addForcedDisp(
node=-1,
pos=np.array([(1.-compr_fact)*self.xbinput.BeamLength,0.,0.]),
FoR='A')
self.savedict['OutputFileRoot']='catenary_beam%s_NE%0.2d_compr%.0fperc'\
%(self.xbinput.BConds,self.xbinput.NumElems,100.*compr_fact)
xbout=Sol112(self.xbinput, self.xbopts, SaveDict=self.savedict)
# Extract SHARPy solution
Cao=XbeamLib.RotCRV(self.xbinput.PsiA_G)
PosDef=xbout.PosDeforStatic
PosDefG = 0.0*PosDef
for ii in range(self.xbinput.NumNodesTot):
PosDefG[ii,:]=np.dot(Cao,PosDef[ii,:])
# Extract tip position
midnode=int((self.xbinput.NumNodesTot-1)/2)
MidGz = PosDefG[midnode,2]
# Estimate length of deformed cable (check extensional deform are low)
#SegLength=np.diff(PosDef.T).T
Lcable=0.0
for nn in range(1,self.xbinput.NumNodesTot):
Lcable+=np.linalg.norm(PosDef[nn,:]-PosDef[nn-1,:])
# check numerics
shear_param = self.xbinput.BeamStiffness[1,1]/\
self.xbinput.BeamStiffness[4,4]* self.xbinput.BeamLength**2
codK = np.linalg.cond(xbout.K)
# ------------------------- derive reference solution (catenary problem)
# utility functions
def funycable(xv,C,C1,C2):
'''Compute cable coordinates'''
return (C*np.cosh( (xv+C1)/C ) + C2)
def fundycable(xv,C,C1,C2):
'''Derivative of cable shape'''
return (np.sinh( (xv+C1)/C ))
def funLcable(xv,C,C1,C2):
'''Integrates for the cable length'''
dyv=fundycable(xv,C,C1,C2)
Iv = np.sqrt(1.+dyv**2)
return scint.trapz(Iv,x=xv)
# position in FoR G
posB=xbout.PosIni[0,:]
posT = self.xbinput.ForcedDisp[-1]['pos']
if self.xbinput.ForcedDisp[-1]['FoR']=='A':
posT=np.dot(Cao,posT)
zB,zT=posB[2],posT[2]
# cable length
Ltarget=self.xbinput.BeamLength
assert Ltarget>np.linalg.norm(posT-posB), 'Cable too short!'
# set-up nonlinear system of equations
Xv=np.linspace(posB[0],posT[0],600)
def ResEval(cv):
''' Evaluate residual associated to the constraints that the
coefficient of the catenary solution need to satisfy '''
Res=[ funycable(Xv[0],*cv)-zB, # left BC
funycable(Xv[-1],*cv)-zT, # right BC
funLcable(Xv,*cv)-Ltarget,] # Length constraint
return Res
cv0=np.array([0.3382, -0.4, -0.6036])
Sol=scopt.root(fun=ResEval,x0=cv0,jac=False,tol=1e-6)#method='SLSQP',
cvsol=Sol['x']
Yvsol=funycable(Xv,*cvsol)
## Plot Tip Position
if self.PLOT:
fig = plt.figure('Deformed Beam - TS_forced_displacements')
ax = fig.add_subplot(111)
hsharpy, = ax.plot( PosDefG[:,0], PosDefG[:,2] ,'r',marker='o',lw=2,
markevery=(0.05,0.1))
href, = ax.plot( Xv, Yvsol ,'b',marker='s',markevery=(0.0,0.1))
#href,=
ax.set_xlabel(r'$x$ [m]')
ax.set_ylabel(r'$z$ [m]')
plt.legend((hsharpy,href,),(r'SHARPy',r'Analytical',),loc=2,)
plt.savefig( self.savedict['OutputDir'] +
self.savedict['OutputFileRoot']+'.png')
#plt.show()
plt.close()
# Comparisons
# Admissible stretch of cable (estimate)
dLtension_max=0.5*self.xbinput.BeamLength*self.xbinput.g*\
self.xbinput.BeamMass[0,0]/self.xbinput.BeamStiffness[0,0]
self.assertTrue(Lcable<self.xbinput.BeamLength+dLtension_max,
msg='The numerically estimated length of the cable should be shorter'
' of the original length is extensional deflections are small')
self.assertTrue( np.abs(Lcable/self.xbinput.BeamLength-1.)<\
dLtension_max/self.xbinput.BeamLength,
msg='The length of the cable changed too much')
# numerical vs analytical
if Sol['success']:
self.assertTrue(
np.abs(np.min(Yvsol)-MidGz)/self.xbinput.BeamLength<self.TOL112,
msg='Mid displacements not matching analytical solution')
return xbout
def test112_TS_forced_displacements_Gdef(self):
'''
Simply supported beam on both sides under gravity and forced
displacements. The beam properties are modified to obtain better
condition numbers while ensuring that shear and extentional deflections
are contained
Checks for general problem is position of boundaries defined in FoR G
'''
self.xbopts.NumLoadSteps=ct.c_int(40)
kvec=np.array([1e3,5e2,5e2,.3,.0015,.0015])
for ii in range(6): self.xbinput.BeamStiffness[ii,ii]=kvec[ii]
mvec=np.array([0.15,0.15,0.15,.1,1e-3,1e-3])
self.xbinput.PsiA_G=np.zeros((3,))
self.xbinput.BConds='TS'
self.xbinput.g=9.81
# add forced displacement at the tip
self.xbinput.addForcedDisp(
node=-1,
pos=np.array([0.8*self.xbinput.BeamLength,
0.0,
0.2*self.xbinput.BeamLength]),
FoR='A')
self.savedict['OutputFileRoot']='catenary_beam%s_NE%0.2d_random_pos'\
%(self.xbinput.BConds,self.xbinput.NumElems)
xbout=Sol112(self.xbinput, self.xbopts, SaveDict=self.savedict)
# Extract SHARPy solution
Cao=XbeamLib.RotCRV(self.xbinput.PsiA_G)
PosDef=xbout.PosDeforStatic
PosDefG = 0.0*PosDef
for ii in range(self.xbinput.NumNodesTot):
PosDefG[ii,:]=np.dot(Cao,PosDef[ii,:])
# Extract tip position
midnode=int((self.xbinput.NumNodesTot-1)/2)
MidGz = PosDefG[midnode,2]
# Estimate length of deformed cable (check extensional deform are low)
Lcable=0.0
for nn in range(1,self.xbinput.NumNodesTot):
Lcable+=np.linalg.norm(PosDef[nn,:]-PosDef[nn-1,:])
# check numerics
shear_param = self.xbinput.BeamStiffness[1,1]/\
self.xbinput.BeamStiffness[4,4]* self.xbinput.BeamLength**2
codK = np.linalg.cond(xbout.K)
print('Shear parameter: %.2e'%shear_param)
print('Stiffness condition number: %.2e'%codK)
print('MidGz=%f'%MidGz)
print('Estimated length of cable: %f'%Lcable)
# ------------------------- derive reference solution (catenary problem)
# utility functions
def funycable(xv,C,C1,C2):
'''Compute cable coordinates'''
return (C*np.cosh( (xv+C1)/C ) + C2)
def fundycable(xv,C,C1,C2):
'''Derivative of cable shape'''
return (np.sinh( (xv+C1)/C ))
def funLcable(xv,C,C1,C2):
'''Integrates for the cable length'''
dyv=fundycable(xv,C,C1,C2)
Iv = np.sqrt(1.+dyv**2)
return scint.trapz(Iv,x=xv)
# position in FoR G
posB=xbout.PosIni[0,:]
posT = self.xbinput.ForcedDisp[-1]['pos']
if self.xbinput.ForcedDisp[-1]['FoR']=='A':
posT=np.dot(Cao,posT)
zB,zT=posB[2],posT[2]
# cable length
Ltarget=self.xbinput.BeamLength
assert Ltarget>np.linalg.norm(posT-posB), 'Cable too short!'
# set-up nonlinear system of equations
Xv=np.linspace(posB[0],posT[0],600)
def ResEval(cv):
''' Evaluate residual associated to the constraints that the
coefficient of the catenary solution need to satisfy '''
Res=[ funycable(Xv[0],*cv)-zB, # left BC
funycable(Xv[-1],*cv)-zT, # right BC
funLcable(Xv,*cv)-Ltarget,] # Length constraint
return Res
cv0=np.array([0.3382, -0.4, -0.6036])
Sol=scopt.root(fun=ResEval,x0=cv0,jac=False,tol=1e-6)#method='SLSQP',
cvsol=Sol['x']
Yvsol=funycable(Xv,*cvsol)
## Plot Tip Position
if self.PLOT:
fig = plt.figure('Deformed Beam - TS_forced_displacements_Gdef')
ax = fig.add_subplot(111)
hsharpy, = ax.plot( PosDefG[:,0], PosDefG[:,2] ,'r',marker='o',lw=2,
markevery=(0.05,0.1))
href, = ax.plot( Xv, Yvsol ,'b',marker='s',markevery=(0.0,0.1))
#href,=
ax.set_xlabel(r'$x$ [m]')
ax.set_ylabel(r'$z$ [m]')
ax.set_xlim((-.1,.9))
ax.set_ylim((-.5,.5))
plt.legend((hsharpy,href,),(r'SHARPy',r'Analytical',),loc=2,)
plt.savefig( self.savedict['OutputDir'] +
self.savedict['OutputFileRoot']+'.png')
#plt.show()
plt.close()
# ----------------------------------
# reshape CRV matrix
PsiDef=PyBeam.Utils.PostPr.reshape_PsiMat(xbout.PsiDeforStatic)
Fval=np.zeros((3,))
Dval=np.zeros((3,))
#embed()
# weigths to be multiplied by nodal values
Fval,Dval=lib_fem.fem_1d_shapefun(numnodeelem=3, z=0.7)
####################################
# Comparisons
# Admissible stretch of cable (estimate)
dLtension_max=0.5*self.xbinput.BeamLength*self.xbinput.g*\
self.xbinput.BeamMass[0,0]/self.xbinput.BeamStiffness[0,0]
self.assertTrue(Lcable<self.xbinput.BeamLength+dLtension_max,
msg='The numerically estimated length of the cable should be shorter'
' of the original length is extensional deflections are small')
self.assertTrue( np.abs(Lcable/self.xbinput.BeamLength-1.)<\
dLtension_max/self.xbinput.BeamLength,
msg='The length of the cable changed too much')
# numerical vs analytical
#embed()
if Sol['success']:
self.assertTrue(
np.abs(np.min(Yvsol)-np.min(PosDefG[:,2]))/\
self.xbinput.BeamLength<self.TOL112,
msg='Mid displacements not matching analytical solution')
return xbout
class Test112_Cardona(unittest.TestCase):
'''
This test looks reproduces the example is sec 6.9 of Cardona and looks at
shear locking. The beam tip deflections and rotations are computed
'''
def setUp(self):
'''
Common piece of code run by each test
'''
self.PLOT=True
self.tol=5e-3
# Beam solution options
self.xbopts = DerivedTypes.Xbopts(
FollowerForce = ct.c_bool(False),
FollowerForceRig=ct.c_bool(False),
MaxIterations = ct.c_int(20),
PrintInfo = ct.c_bool(True),
OutInaframe = ct.c_bool(True),
NumLoadSteps = ct.c_int(10),
Solution = ct.c_int(112),
MinDelta = ct.c_double(1e-6),
NewmarkDamp = ct.c_double(1e-2))
# saving options
self.savedict=Settings.SaveDict
self.savedict['OutputDir'] = Settings.SharPyProjectDir + \
'output/tests/PyBeam/NonlinearStatic/'
return self
def test112_Cardona_shear_locking(self):
'''
For physical beams (in which the shear factor is not too high), using
quadratic elements avoids shear locking even with very low
discretisations. Residual bending flexibility corrections are also not
required.
With reference to Gerardin and Cardona, Tab.1 to 4, results computed in
this test should therefore show better convergence properties then those
from Gerardin and Cardona.
'''
Nelvec=[1,2,4,8,16,]
Ntot=len(Nelvec)
zvec=np.zeros((Ntot,2))
fivec=np.zeros((Ntot,2))
Cvec = np.zeros((Ntot,2))
Ez, Efi=np.zeros((Ntot,2)),np.zeros((Ntot,2))
# Mass/Stiffness (Cardona)
mvec=np.array([1.,1.,1.,1.,1.,1.])
kvec=np.array([5e8,3.231e8,3.231e8,1e7,9.345e6,9.345e6])
Ftip=[600.,600000.0]
tip_ref = [2.681900e-3,2.159081]
fi_ref = [-8.025680e-4,-6.720002e-1]
for ff in range(2):
for nn in range(Ntot):
Nel=Nelvec[nn]
# Beam input
self.xbinput = DerivedTypes.Xbinput(NumNodesElem=3,NumElems=Nel,
g=0.0, BConds='CF')
self.xbinput.BeamLength = 5.0
self.xbinput.PsiA_G=0.0*np.array([1,0,0]) # rotations will be wrong
# if this is non-zero
for ii in range(6): self.xbinput.BeamMass[ii,ii]=mvec[ii]
for ii in range(6): self.xbinput.BeamStiffness[ii,ii]=kvec[ii]
self.xbinput.ForceStatic[-1,2]=Ftip[ff]
self.savedict['OutputFileRoot'] = 'Cardona_beam%s_NE%0.2d' \
%( self.xbinput.BConds, self.xbinput.NumElems)
xbout=Sol112(self.xbinput, self.xbopts, SaveDict=self.savedict)
# Extract SHARPy solution
Cao=XbeamLib.RotCRV(self.xbinput.PsiA_G)
PosDefG = 0.0*xbout.PosDeforStatic
for ii in range(self.xbinput.NumNodesTot):
PosDefG[ii,:]=np.dot(Cao,xbout.PosDeforStatic[ii,:])
# Extract tip position
zvec[nn,ff] = PosDefG[-1,2]
fivec[nn,ff] = xbout.PsiDeforStatic[-1,1,1]
Cvec[nn,ff] = np.linalg.cond(xbout.K)
Ez[:,ff]=np.abs(zvec[:,ff]/tip_ref[ff]-1.0)
Efi[:,ff]=np.abs(fivec[:,ff]/fi_ref[ff]-1.0)
shear_param = kvec[1]/kvec[4]*self.xbinput.BeamLength**2
#embed()
self.assertTrue(np.any(Ez>TOL),msg='The maximum tip displacement relative '
'error, %.3e, is above the %.3e tolerance'%(np.max(Ez),self.tol))
self.assertTrue(np.max(Efi)>TOL,msg='The maximum tip rotation relative '
'error, %.3e, is above the %.3e tolerance'%(np.max(Efi),self.tol))
return xbout
if __name__=='__main__':
#T=Test112()
#T.setUp()
#xbout=T.test112_CFlinunif()
#xbout=T.test112_CClinunif()
#xbout=T.test112_TSlinunif()
#xbout=T.test112_TSlinunif_linearity()
#T.test112_TS_forced_displacements()
#T.test112_TS_forced_displacements_Gdef()
T=Test112F90()
T.setUp()
#xbout=T.test112_F90_CFlinunif()
#xbout=T.test112_F90_CClinunif()
#xbout=T.test112_F90_TSlinunif()
T.test112_F90_TS_forced_displacements()
#T=Test112_Cardona()
#T.setUp()
#xbout=T.test112_Cardona_shear_locking()
# still in progress
#T=Test312()
#T.setUp()
#xbout=T.test312_TS_ImpTrue_FollRigTrue()
unittest.main()
| [
"salvatore.maraniello10@imperial.ac.uk"
] | salvatore.maraniello10@imperial.ac.uk |
555944a65f31514e412fa766e9e27d9cd39d8748 | a8cf6677f22f149e1282fc990d22f2687133b748 | /project_name/Scripts/pip3-script.py | 8e9273dc301c71c43e4076ad70f2b0350efc6eb7 | [] | no_license | Aaron-Xyl/second_project | f3661ed67e3d069be8a7c3271bd552a626280c46 | 072ff3ce3605afa000d4e8ddad2df289eeacaaaf | refs/heads/master | 2020-07-11T04:40:46.973346 | 2019-08-26T10:53:39 | 2019-08-26T10:53:39 | 204,447,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | #!G:\python\Project\placeholder\project_name\Scripts\python3.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"18898532715@163.com"
] | 18898532715@163.com |
c288b826ea0a0ad8b0d56dcfa02d79146f876519 | ffe7c8ccdd7343b52c4873629d581c250643b8f2 | /src/main/resources/ExampleResources/CraftTaskExample/NaoRest.py | 4a71e06cd60051dfaba8cfd8f9c15a57f335c4bc | [
"MIT"
] | permissive | magkai/MultimodalFissionFramework | 032f48554843f78d692788ca97183e28291c5d7e | e991b1ef008dee2d33671390e1a9cf4820f10fc5 | refs/heads/master | 2022-10-13T14:42:42.146941 | 2022-09-08T14:57:55 | 2022-09-08T14:57:55 | 91,969,895 | 0 | 0 | MIT | 2022-09-08T14:57:56 | 2017-05-21T16:04:40 | Java | UTF-8 | Python | false | false | 583 | py | # -*- encoding: UTF-8 -*-
import time
import motion
import argparse
from naoqi import ALProxy
def main(robotIP, PORT=9559):
motionProxy = ALProxy("ALMotion", robotIP, PORT)
# Go to rest position
motionProxy.rest()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="192.168.0.2",
help="Robot ip address")
parser.add_argument("--port", type=int, default=9559,
help="Robot port number")
args = parser.parse_args()
main(args.ip, args.port)
| [
"s8makais@stud.uni-saarland.de"
] | s8makais@stud.uni-saarland.de |
1979e35639f3056a5948ce7a000da852b4811a53 | 5853c848509ce63a314d617f5a206231501acd86 | /contact_form/urls.py | ed413e55dea3fd4baeadf79c38229aa5d5fafa89 | [] | no_license | meto5578/ContactForm | da9fa6e171e5e125fde11234e63d929131f85b43 | 3d176d893b4d2bdc100b9e0478448ca11f26c84b | refs/heads/master | 2022-04-14T15:51:08.923609 | 2020-04-13T00:05:06 | 2020-04-13T00:05:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | """contact_form URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path("", include("Home.urls")),
]
| [
"47386523+MehmetBulut94@users.noreply.github.com"
] | 47386523+MehmetBulut94@users.noreply.github.com |
94f389ae6c93122142d74d64201db742d6feb15b | 286af5ae0e60bfd00dc6d3525bfde9779d18ed82 | /django_movie_project/asgi.py | 081daff73a8895202d842890032be5f5460f44b8 | [
"MIT"
] | permissive | Mazev/django_movie_project | 39e2d041499014f5f8a9ef859f44e015f3178abd | fa44a5b8ee804bb6315d270d62b9552560bd2b6f | refs/heads/main | 2023-07-09T10:25:14.952184 | 2021-08-14T10:43:54 | 2021-08-14T10:43:54 | 393,784,908 | 0 | 0 | MIT | 2021-08-14T10:42:54 | 2021-08-07T20:24:13 | HTML | UTF-8 | Python | false | false | 417 | py | """
ASGI config for django_movie_project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_movie_project.settings')
application = get_asgi_application()
| [
"77510376+Mazev@users.noreply.github.com"
] | 77510376+Mazev@users.noreply.github.com |
eae98955e6977895c270a381ad037dc674d3a82e | ad1771f15876b0c1caff78525f4af3632928a679 | /diarioweb/diarioweb/wsgi.py | 1ae6cf22d6c349c0c80cdd1ae4cd45296943e92f | [] | no_license | rennerocha/querido-diario-web | 096764329367a66f6b6ede1a2d8ce12e17ccbd9d | 97909c8370f02aed561c25a8fa681fb419037446 | refs/heads/main | 2023-02-07T07:43:25.331421 | 2020-12-28T20:18:16 | 2020-12-28T23:35:49 | 325,133,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for diarioweb project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'diarioweb.settings')
application = get_wsgi_application()
| [
"renne@rennerocha.com"
] | renne@rennerocha.com |
8fe8e2b9ddbc59b0638ef73a3b8fd69500039160 | 6127f45e641a2fca33f205a21585d3191600b565 | /src/mediaforeman/metadata_parsers/base_parser.py | 457d570db5dc36afb9a6c465be119019084e4fee | [] | no_license | KieranEMiller/media-foreman | 9e6d6f8a4816641a405f899b965e9c6ae68ca9d2 | 4084a72f65b49d0a69b6df8a01bbc37dc4dfdb2e | refs/heads/master | 2022-07-06T14:06:25.953514 | 2022-07-02T01:50:28 | 2022-07-02T01:50:28 | 230,265,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | from abc import abstractmethod
from PIL import Image
from io import BytesIO
class BaseParser(object):
def __init__(self, path):
self.Path = path
@abstractmethod
def ExtractProperties(self):
pass
@abstractmethod
def ExtractImageProperties(self):
pass
@abstractmethod
def SaveMetadata(self, mediaFle):
pass
def GetImageSizeFromByteArray(self, rawBytes):
img = Image.open(BytesIO(rawBytes))
width, height = img.size
img.close()
return width, height | [
"kieranemiller@gmail.com"
] | kieranemiller@gmail.com |
73b5b1f81a6dd6871e259d59ce4bde00db6baac5 | a1bf01f7319157bc539ee134be45a5c32399a7f0 | /article/migrations/0008_auto_20190331_1638.py | cf809b8c0440b5bffbbced5b5fe3e7b8585296f4 | [] | no_license | JusticeGu/mysite | 6129431ad68db9a6c76e6c4aa6da9d8e60613928 | fd967454e696f271b82d9c4512561e7b88832e7c | refs/heads/master | 2020-05-22T07:09:33.472790 | 2019-09-10T14:26:13 | 2019-09-10T14:26:13 | 186,221,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | # Generated by Django 2.1.7 on 2019-03-31 08:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('article', '0007_auto_20190331_1630'),
]
operations = [
migrations.RenameField(
model_name='articlepost',
old_name='author',
new_name='autror',
),
]
| [
"940331234@qq.com"
] | 940331234@qq.com |
030b4608bde34b24022a276aea7b20f89754712a | 5848e70fbb1f33785c625876002212d0d0fad197 | /test/celery_test.py | 5516f1d78b7ae264318c513fa794c0d4edf051e2 | [] | no_license | LineCrew/legacy-realtime-api | dd27d31a6681c7cf708fb9c86b1ce48132a022cb | 7f052231dc46046cd673a2d42c21737d1b73198b | refs/heads/master | 2022-12-11T02:42:29.202953 | 2018-07-11T03:41:21 | 2018-07-11T03:41:21 | 140,517,913 | 0 | 0 | null | 2022-01-06T22:25:33 | 2018-07-11T03:40:58 | Python | UTF-8 | Python | false | false | 772 | py | import tasks.matching_task
import redis
from domain.queue_match_model import QueueMatchModel
r = redis.StrictRedis(host='localhost', port=6379, db=0, decode_responses=True)
# for i in range(100):
# result = tasks.matching_task.add.delay(1, i)
# print(result.get())
data = {
'socket_id': 1,
'user_id': 1,
'questionaire_id': 1
}
data_ = {
'socket_id': 2,
'user_id': 2,
'questionaire_id': 1
}
r.lpush('game_match_queue', QueueMatchModel(1, 1, 1).get_session_model())
r.lpush('game_match_queue', QueueMatchModel(2, 2, 1).get_session_model())
r.lpush('game_match_queue', QueueMatchModel(3, 3, 1).get_session_model())
r.lpush('game_match_queue', QueueMatchModel(4, 4, 1).get_session_model())
tasks.matching_task.game_matching_task.delay()
| [
"topbladep@gmail.com"
] | topbladep@gmail.com |
df49b758b26b77aff034bf727799efd3ac11f336 | 7996a67b8037b88adf4af6b3551ca5d4e586b7d0 | /GettingStarted/war_cardgame.py | 76384228e38e21a9fccc4d39162cbf18d493c772 | [] | no_license | Azimovv/Learn | cc39d8df95c055a14b49236830318d2a9bf41b90 | 03d0e7fe33c2c84e6f21ad3b3eeb2f527b7ab198 | refs/heads/master | 2022-12-10T06:46:49.303302 | 2019-12-02T20:35:28 | 2019-12-02T20:35:28 | 215,112,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,682 | py | from random import shuffle
class Card:
suits = ["Spades", "Hearts", "Diamonds", "Clubs"]
values = [None, None, "2", "3", "4", "5", "6", "7", "8", "9", "10",
"Jack", "Queen", "King", "Ace"]
def __init__(self, value, suit):
"""suit and values should be integers"""
self.value = value
self.suit = suit
def __lt__(self, other):
if self.value < other.value:
return True
if self.value == other.value:
if self.suit < other.suit:
return True
else:
return False
return False
def __gt__(self, other):
if self.value > other.value:
return True
if self.value == other.value:
if self.suit > other.suit:
return True
else:
return False
return False
def __repr__(self):
return self.values[self.value] + " of " + self.suits[self.suit]
class Deck:
def __init__(self):
self.cards = []
for i in range(2,15):
for j in range(4):
self.cards.append(Card(i,j))
shuffle(self.cards)
def remove_card(self):
if len(self.cards) == 0:
return
return self.cards.pop()
class Player:
def __init__(self, name):
self.wins = 0
self.name = name
class Game:
def __init__(self):
name1 = input("Enter player 1's name: ")
name2 = input("Enter player 2's name: ")
self.deck = Deck()
self.player1 = Player(name1)
self.player2 = Player(name2)
def winner(self, player1, player2):
if player1.wins > player2.wins:
return player1.name
if player1.wins < player2.wins:
return player2.name
return "It was a tie!"
def play_game(self):
cards = self.deck.cards
print("Let's begin")
response = None
while len(cards) >= 2 and response != "q":
response = input("Press 'q' to quit, any other key to play: ")
player1_card = self.deck.remove_card()
player2_card = self.deck.remove_card()
print("{} drew {} and {} drew {}".format(self.player1.name, player1_card, self.player2.name, player2_card))
if player1_card > player2_card:
self.player1.wins += 1
print ("{} wins this round".format(self.player1.name))
else:
self.player2.wins += 1
print("{} wins this round".format(self.player2.name))
print("The War is over! {} wins".format(self.winner(self.player1, self.player2)))
game = Game()
game.play_game() | [
"suggzugg7375@gmail.com"
] | suggzugg7375@gmail.com |
b1457b90a5e2770e3de9d457ab35dcacf14100bd | 161b246f8027953985a35bb9216ab54eaff92df8 | /HW4/task4/dumper.py | 756f7dce458fe1327411158fc6de04bde7e1cfa3 | [] | no_license | eliasel/TIF320 | 74824df212a8e6b5eaaeda5567ac4cddf2b97e6c | ee5664ed8883827c7841609135e3d5addce45a51 | refs/heads/master | 2023-03-17T19:34:57.220055 | 2021-03-12T16:32:57 | 2021-03-12T16:32:57 | 335,300,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,470 | py | import numpy as np
from ase.units import Hartree
from gpaw.lrtddft import LrTDDFT
def dump_data(lr, fpath):
'''
NOTE: This dumps everything in atomic units
Inputs:
lr: GPAW LrTDDFT calculator
If you saved the calculator to file (i.e. calcFile.dat)
you can load it via calc=LrTDDFT('calcFile.dat')
fpath: Path and filename you wish to dump to
Keywords for dumped quantities:
'i_p': Kohn-Sham state that gets excited from
'a_p': Kohn-Sham state that gets excited to
'ediff_p': Kohn-Sham eigenvalue differences for each KS excitation
'fdiff_p': Kohn_sham occupation differe for each KS excitation
'mux_p': x-component of dipole matrix elements
'muy_p': y-component of dipole matrix elements
'muz_p': z-component of dipole matrix elements
'K_pp': K-matrix
'''
# Kohn-Sham electron-hole pairs
kss = lr.kss
Np = len(kss)
# Read arrays
i_p = np.zeros(Np, dtype=int)
a_p = np.zeros(Np, dtype=int)
w_p = np.zeros(Np)
f_p = np.zeros(Np)
mu_vp = np.zeros((3, Np))
for p, ks in enumerate(kss):
i_p[p] = ks.i
a_p[p] = ks.j
w_p[p] = ks.energy
f_p[p] = ks.fij
mu_vp[:, p] = ks.mur
# Read K matrix
Omega_pp = lr.Om.full
sqfw_p = np.sqrt(f_p * w_p)
K_pp = 0.5 * (Omega_pp - np.diag(w_p**2)) / np.outer(sqfw_p, sqfw_p)
np.savez_compressed(fpath, i_p=i_p, a_p=a_p, ediff_p=w_p, fdiff_p=f_p,
mux_p=mu_vp[0], muy_p=mu_vp[1], muz_p=mu_vp[2],
K_pp=K_pp)
dump_data(LrTDDFT.read('be'),'dumpsterboi.dump')
| [
"eeelmquist@gmail.com"
] | eeelmquist@gmail.com |
5a8a9d2bbb5ac5634c8f2cd4aef9284d82af5f21 | 598289b077a725ea5679d3d2d68fe9149f379c38 | /tune/db_workers/__init__.py | f0f424397e315fbb64f2d6a158a410623d8d5e78 | [
"Apache-2.0"
] | permissive | AlexisOlson/chess-tuning-tools | d66c053922c7c8cb5502b4b22e9a9a0b6366b04f | 8a66fdcb8e3ad3142f44a752399bede4ff2937ab | refs/heads/master | 2022-12-03T10:53:14.632540 | 2020-03-02T15:47:26 | 2020-03-02T15:47:26 | 273,330,229 | 0 | 0 | NOASSERTION | 2020-06-18T20:07:50 | 2020-06-18T20:07:49 | null | UTF-8 | Python | false | false | 80 | py | from .tuning_client import TuningClient
from .tuning_server import TuningServer
| [
"noreply@github.com"
] | AlexisOlson.noreply@github.com |
791544273fa07000a7943179b334d3c316425fed | 0ef4371c87c2196d9c2d2706e51f4b452f6e9d19 | /4_Curso/Proyecto_Sistemas_Informáticos/model_exam_2/venv/lib/python3.7/site-packages/PIL/_binary.py | e5ee0bf28dd3c05a2ceb20b96318b5dd3b1f2eb2 | [
"Apache-2.0"
] | permissive | AlejandroSantorum/Apuntes_Mat_IngInf | 49c41002314216a994aa60db04062e34abc065eb | c047e41d086f3028ec78ac3a663b9848862e52df | refs/heads/master | 2023-05-15T03:02:56.882342 | 2023-04-20T20:19:54 | 2023-04-20T20:19:54 | 212,392,195 | 29 | 10 | Apache-2.0 | 2023-09-09T13:03:45 | 2019-10-02T16:44:22 | Jupyter Notebook | UTF-8 | Python | false | false | 1,882 | py | #
# The Python Imaging Library.
# $Id$
#
# Binary input/output support routines.
#
# Copyright (c) 1997-2003 by Secret Labs AB
# Copyright (c) 1995-2003 by Fredrik Lundh
# Copyright (c) 2012 by Brian Crowell
#
# See the README file for information on usage and redistribution.
#
from struct import unpack_from, pack
from ._util import py3
if py3:
def i8(c):
return c if c.__class__ is int else c[0]
def o8(i):
return bytes((i & 255,))
else:
def i8(c):
return ord(c)
def o8(i):
return chr(i & 255)
# Input, le = little endian, be = big endian
def i16le(c, o=0):
"""
Converts a 2-bytes (16 bits) string to an unsigned integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from("<H", c, o)[0]
def si16le(c, o=0):
"""
Converts a 2-bytes (16 bits) string to a signed integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from("<h", c, o)[0]
def i32le(c, o=0):
"""
Converts a 4-bytes (32 bits) string to an unsigned integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from("<I", c, o)[0]
def si32le(c, o=0):
"""
Converts a 4-bytes (32 bits) string to a signed integer.
:param c: string containing bytes to convert
:param o: offset of bytes to convert in string
"""
return unpack_from("<i", c, o)[0]
def i16be(c, o=0):
return unpack_from(">H", c, o)[0]
def i32be(c, o=0):
return unpack_from(">I", c, o)[0]
# Output, le = little endian, be = big endian
def o16le(i):
return pack("<H", i)
def o32le(i):
return pack("<I", i)
def o16be(i):
return pack(">H", i)
def o32be(i):
return pack(">I", i)
| [
"alejandro.santorum@gmail.com"
] | alejandro.santorum@gmail.com |
6cd5ade7cc061ed205f19d1a38b010589f340f11 | 3ee98ee366aa7a88226719c5c6f12dafe2660f3a | /vagrant/Plone-4.3.2-UnifiedInstaller/tests/testall.py | ea783666d8c040e7221767705732720d0d9657f4 | [] | no_license | hellfish2/pythoncantv | 19d87a34102198d16cdeae5dd970553dd28316cf | f140d8fa759d4c27117b5f24951b61c37cc00fea | refs/heads/master | 2021-01-18T14:18:39.394965 | 2013-11-23T08:40:26 | 2013-11-23T08:40:26 | 14,628,891 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | #!/usr/bin/env python
import doctest
doctest.ELLIPSIS_MARKER = '-etc-'
doctest.testfile("tests.txt", optionflags=doctest.ELLIPSIS or doctest.NORMALIZE_WHITESPACE)
print 'Done.'
| [
"root@equipo01.cantv.com.ve"
] | root@equipo01.cantv.com.ve |
111e7f53dd9e97d2a78c1259ba38059220b75653 | d52b90bebf99c8de986cd0b78b03cfadcc22f9c2 | /code/read_data/get_artist_birthyears.py | 3c6474e5da6e98b2a91f664bcd9bdf3bca326911 | [] | no_license | benpry/COG403-songlyrics | 04a8fcd835572eaaf60d0ffabd5145d026514295 | 19f5afecf35dbf3c98a1dfd0b93db571b8c4ea33 | refs/heads/main | 2023-04-12T23:08:16.100889 | 2021-04-15T01:40:44 | 2021-04-15T01:40:44 | 343,598,281 | 0 | 0 | null | 2021-04-14T07:38:54 | 2021-03-02T00:35:22 | TeX | UTF-8 | Python | false | false | 3,004 | py | import requests
import bs4
import string
import csv
ARTISTS_PATH = "../../data/processed/artists.csv"
OUTPUT_PATH = "../../data/processed/artist_birthyears.csv"
if __name__ == "__main__":
# Function to lookup birthyears
def lookup(name):
str(name)
print('Searching ', name)
# Check if artist has a wikipedia page
url_end = name.replace(" ", "_")
url = "https://en.wikipedia.org/wiki/" + url_end
response = requests.get(url)
if response.status_code == 200:
# Collect text from the first 2 paragraphs on their page
html = bs4.BeautifulSoup(response.text, 'html.parser')
paragraphs = html.select("p")
intro = '\n'.join([ para.text for para in paragraphs[0:2]])
# Search for first occurence of word "born"
b = intro.find(" born ")
if b != -1:
# Collect words from the 150 characters following "born"
remove = dict.fromkeys(map( ord, string.punctuation))
txt = intro[b:b+150].translate(remove).lower()
txt_split = txt.split()[1:]
print(txt_split)
# Grab birthyear from those words
year = ""
i = 0
while year == "" and i < len(txt_split):
# Set current word
word = txt_split[i]
i += 1
# Look for a year
if word.isnumeric() and 4 <= len(word) and len(word) <= 6:
# Any 4 digit number is potential year
if len(word) == 4:
potential_year = int(word)
# If there's a 5 or 6 digit number - it's possibly a year with footnote
elif word.isnumeric() and ( len(word) == 5 or len(word) == 6 ):
# Check if the first 4 digits form a valid year
potential_year = int(word[0:4])
# Check that the number is a valid year
if potential_year > 1900 and potential_year < 2020:
return potential_year
# Returns 0 if birthyear wasn't found
return 0
# Get all artists
artists = []
fp = open(ARTISTS_PATH, "r", encoding="utf-8")
lines = fp.readlines()
artists = [line.rstrip('\n') for line in lines]
# Get birthyears
artist_birthyears = []
# For running in batches:
# for artist in artists[14000:]:
for artist in artists:
birthyear = lookup(artist)
if birthyear != 0:
artist_birthyears.append([artist, birthyear])
# Write to file
# For running in batches:
# with open(OUTPUT_PATH, 'a') as outfile:
with open(OUTPUT_PATH, 'w') as outfile:
mywriter = csv.writer(outfile)
for i in artist_birthyears:
mywriter.writerow(i) | [
"riaprakash.1999@gmail.com"
] | riaprakash.1999@gmail.com |
a2be2185fe3647cec92cc450fe6ff4f92f11d6dc | ce3b659f6b87b4379c38ef2b725cb6513f278b62 | /run_pattern.py | 1c43f7d620c12245c658bac97e9a6eb221b39190 | [] | no_license | patricknaughton01/LEDTileControl | 5259f84fda06043171cbfa040a01a3e759254a4e | 34f9eb3fabf837ce909bbcca520775171fe51172 | refs/heads/master | 2020-04-02T15:49:35.989674 | 2018-11-07T20:23:53 | 2018-11-07T20:23:53 | 154,585,265 | 0 | 0 | null | 2018-10-25T00:08:13 | 2018-10-25T00:08:13 | null | UTF-8 | Python | false | false | 2,189 | py | #!/usr/bin/env python3
import argparse
import importlib
import random
import numpy as np
import time
import tile
def run_pattern(board, leds, pattern, extra_args):
pattern = importlib.import_module("patterns." + pattern)
try:
while True:
pattern.display(board, leds, *extra_args)
except KeyboardInterrupt:
# black the display
black = np.tile([0, 0, 0], board.shape).astype(np.uint8)
try:
leds.draw(black)
del leds
except KeyboardInterrupt:
pass
def shuffle(board, leds):
patterns = [
('checkerboard', []),
('patrickstar', []),
('text', ['Vertigo']),
]
switch_time = 10
while True:
start = time.monotonic()
name, extra_args = random.choice(patterns)
pattern = importlib.import_module("patterns." + name)
# run for switch_time seconds
while time.monotonic() - start < switch_time:
pattern.display(board, leds, *extra_args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Display a pattern on the dancefloor')
parser.add_argument('pattern', help='Name of pattern file')
parser.add_argument('extra_args', help='Arguments for the pattern', nargs='*')
parser.add_argument('--rows', help='Number of tile rows', type=int, default=1)
parser.add_argument('--cols', help='Number of tile columns', type=int, default=1)
parser.add_argument('--height', help='LEDs per tile row', type=int, default=10)
parser.add_argument('--width', help='LEDs per tile column', type=int, default=10)
parser.add_argument('--sim', help='Simulation strategy', choices=('OpenCV', 'Matplotlib'))
args = parser.parse_args()
board = tile.TileArray(rows=args.rows, cols=args.cols, height=args.height, width=args.width)
if args.sim == 'OpenCV':
leds = tile.LEDSimulatorCV(board)
elif args.sim == 'Matplotlib':
leds = tile.LEDSimulatorMatplotlib(board)
else:
leds = tile.LEDStrip(board)
if args.pattern == 'shuffle':
shuffle(board, leds)
else:
run_pattern(board, leds, args.pattern, args.extra_args)
| [
"cma2714@gmail.com"
] | cma2714@gmail.com |
c42cbac977750cf71cbb4751ad98b9561814b1b3 | e6b6fa1135c37467d3081afcfadd5c44b41dee7d | /buton.py | a1cf45c8c18122a4bd8b5a1ed7f775442c94a3dc | [] | no_license | AlaaMarawi/Automatic-Door_Web-controlled | c6d261f7a2b8c2d6970b15dc17618dd3d04b110c | e5ea87fd4b4aebe4c5768ad99a3901b005d04915 | refs/heads/master | 2020-06-10T20:54:14.343291 | 2019-06-25T20:39:43 | 2019-06-25T20:39:43 | 193,744,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_UP)
try:
while True:
button_state = GPIO.input(18)
print(button_state)
except:
GPIO.cleanup()
| [
"noreply@github.com"
] | AlaaMarawi.noreply@github.com |
33ef7fc60c8ad55c61511844539b647bc0fde222 | 27d53f727d838322d87cc055789125f04350782c | /SubstMatrix.py | 3c58455251f6b42ef65e90116eab05bd3076f94c | [] | no_license | CarinaAfonso/Trabalho_AASB | 3e416459928803bcb1fcac889c40030bd6828a21 | 6ce4f7cac96c3bf6ac0bfdbe28de12da14a569a4 | refs/heads/master | 2020-12-26T12:23:12.494006 | 2020-02-03T15:23:43 | 2020-02-03T15:23:43 | 237,508,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,948 | py |
class SubstMatrix:
def __init__(self):
self.alphabet = ""
self.sm = {}
def loadFromFile(self, filename, sep):
f = open(filename, "r")
line = f.readline()
tokens = line.split(sep)
ns = len(tokens)
self.alphabet = ""
for i in range(0, ns):
self.alphabet += tokens[i][0]
for i in range(0,ns):
line = f.readline();
tokens = line.split(sep);
for j in range(0, len(tokens)):
k = self.alphabet[i]+self.alphabet[j]
self.sm[k] = int(tokens[j])
f.close()
return None
def createFromMatchPars(self, match, mismatch, alphabet):
self.alphabet = alphabet
for c1 in alphabet:
for c2 in alphabet:
if (c1 == c2):
self.sm[c1+c2] = match
else:
self.sm[c1+c2] = mismatch
return None
def scorePair(self, c1, c2):
if c1 not in self.alphabet or c2 not in self.alphabet:
return None
return self.sm[c1+c2]
def __getitem__(self, ij):
i, j = ij
return self.scorePair(i, j)
def test1():
sm = SubstMatrix()
sm.loadFromFile("blosum62.mat", "\t")
print(sm.alphabet)
print(sm.scorePair("G", "M"))
print(sm.scorePair("W", "W"))
print(sm.scorePair("A", "S"))
print(sm.scorePair("X", "X"))
print(sm["G","K"])
print(sm["T","T"])
# Result test 1
# ARNDCQEGHILKMFPSTWYV
# -3
# 11
# 1
# None
# -2
# 5
def test2():
sm = SubstMatrix()
sm.createFromMatchPars(3, -1, "ACGU")
print(sm.alphabet)
print(sm.scorePair("A", "A"))
print(sm.scorePair("A", "U"))
print(sm.scorePair("T", "T"))
print(sm["G","G"])
# Result test 2
# ACGU
# 3
# -1
# None
# 3
if __name__ == "__main__":
test1()
print()
test2()
| [
"noreply@github.com"
] | CarinaAfonso.noreply@github.com |
3649ead4f7799fb73bd6795921280b712cd6ee57 | a884039e1a8b0ab516b80c2186e0e3bad28d5147 | /Livros/Livro-Introdução à Programação-Python/Capitulo 7/Exercicios 7/Exercicio7_6.py | 165f405bfbe336d2a586579cf8a0d6182a73d80e | [
"MIT"
] | permissive | ramonvaleriano/python- | 6e744e8bcd58d07f05cd31d42a5092e58091e9f0 | ada70918e945e8f2d3b59555e9ccc35cf0178dbd | refs/heads/main | 2023-04-10T14:04:24.497256 | 2021-04-22T18:49:11 | 2021-04-22T18:49:11 | 340,360,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | # Program: Exercicio7_6.py
# Author: Ramon R. Valeriano
# Description:
# Developed: 16/05/2020 - 19:51
# Updated:
s1 = str(input("Entre com o primeiro nome: "))
s2 = str(input("Entre com o segundo nome: "))
s3 = str(input("Entre com o terceiro nome: "))
quantity = list()
wt = list(s2)
wn = list(s3)
for n in s2:
if n in s1:
number = s1.find(n)
quantity.append(number)
wf = list(s1)
new = list()
cont = 0
for e in wf:
if e not in s2:
new.append(e)
elif e in s2:
new.append(wn[0])
cont+=1
new="".join(new)
print(new)
| [
"rrvaleriano@gmail.com"
] | rrvaleriano@gmail.com |
cb1c7b97a3dd6c10dcb15ace6f14fe3dd98a585e | be0dd4305db0ed58693af484edf89b4f94765f92 | /update_HSintersigma.py | 15ba23d8250ee5f11af15a710ea69c9d22f7b343 | [] | no_license | georgezhou/hsfu23 | 44468e2604dff31b7f5b024d25d17904f18caaf8 | 8184b23f1d3333e0fe7664db4aeb253a3caf726f | refs/heads/master | 2020-04-15T01:40:46.478620 | 2015-04-15T06:40:20 | 2015-04-15T06:40:20 | 2,831,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,873 | py | import functions
import mysql_query
from numpy import *
import os
import sys
import string
import pyfits
###################
### Description ###
###################
### Update hatsouth.intersigma
########################
### Start of program ###
########################
start_date = "2013-05-24"
end_date = "2013-05-25"
query_entry = "select SPECtype,SPECobject,SPECmjd,SPEChjd,SPECrv,SPECrv_err,SPECtelescope,SPECresolution,SPECteff,SPECteff_err,SPEClogg,SPEClogg_err,SPECfeh,SPECfeh_err,SPECccfheight,SPECexptime,SPECsn"
query_entry = query_entry + " from SPEC where SPECutdate >= \""+start_date+"\" and SPECutdate <=\""+end_date+"\" and SPECobject like \"HATS%\" and SPECtelescope=\"ANU23\""
#query_entry = query_entry + " from SPEC where SPECobject = \"HATS563-025\" "
exposure_info = mysql_query.query_hsmso(query_entry)
#print exposure_info
if len(exposure_info) > 0:
output = ""
for entry in exposure_info:
if entry[0] == "RV":
output = output + entry[1] + " " #name
output = output + str(2400000 + entry[3]) + " " #hjd
output = output + str(entry[4]) + " " #RV
output = output + str(entry[5]) + " " #RVerr
output = output + str(entry[6]) + " " #tel
output = output + str(entry[7]) + " " #res
output = output + "0 0 0 0 0 0 0 0 " #teff terr logg loggerr feh feherr vrot vroterr
output = output + str(entry[14]) + " " #ccfheight
output = output + str(entry[15]) + " " #exptime
output = output + str(entry[16]) + "\n" #S/N
if entry[0] == "ST":
output = output + entry[1] + " " #name
output = output + str(2400000 + entry[2]) + " " #mjd
output = output + "-999 -999 " #rv rverr
output = output + str(entry[6]) + " " #tel
output = output + str(entry[7]) + " " #res
output = output + str(entry[8]) + " 300 " #teff
#output = output + str(entry[9]) + " "
output = output + str(entry[10]) + " 0.3 " #logg
#output = output + str(entry[11]) + " "
output = output + str(entry[12]) + " 0.5 " #feh
output = output + "0 0 0 " #feh feherr vrot vroterr ccfheight
output = output + str(entry[15]) + " " #exptime
output = output + str(entry[16]) + "\n" #S/N
output_file = open("HSintersigma_output","w")
output_file.write(output)
output_file.close()
print output
print "No. of observations: ",len(exposure_info)
update = raw_input("Update hatsouth.intersigma webpage? (y/n): ")
if update == "y":
os.system("./insertHSvelocities.py HSintersigma_output")
remove_temp = raw_input("Remove HSintersigma_output temporary ascii file? (y/n): ")
if remove_temp == "y":
os.system("rm HSintersigma_output")
####################
### RV Standards ###
####################
query_entry = "select SPECtype,SPECobject,SPECmjd,SPEChjd,SPECrv,SPECrv_err,SPECtelescope,SPECresolution,SPECteff,SPECteff_err,SPEClogg,SPEClogg_err,SPECfeh,SPECfeh_err,SPECccfheight,SPECexptime,SPECsn"
query_entry = query_entry + " from SPEC where SPECutdate >= \""+start_date+"\" and SPECutdate <=\""+end_date+"\" and SPECinstrum=\"echelle\""
query_entry = query_entry + " and (SPECobject=\"HD100623\""
query_entry = query_entry + " or SPECobject=\"HD97343\""
query_entry = query_entry + " or SPECobject=\"HD96700\""
query_entry = query_entry + " or SPECobject=\"HD37213\""
query_entry = query_entry + " or SPECobject=\"HD34721\""
query_entry = query_entry + " or SPECobject=\"HD196761\""
query_entry = query_entry + " or SPECobject=\"HD189625\""
query_entry = query_entry + " or SPECobject=\"HD198802\""
query_entry = query_entry + ")"
exposure_info = mysql_query.query_hsmso(query_entry)
#print exposure_info
if len(exposure_info) > 0:
output = ""
for entry in exposure_info:
if entry[0] == "RV":
output = output + entry[1] + " " #name
output = output + str(2400000 + entry[3]) + " " #hjd
output = output + str(entry[4]) + " " #RV
output = output + str(entry[5]) + " " #RVerr
output = output + str(entry[6]) + " " #tel
output = output + str(entry[7]) + " " #res
output = output + "0 0 0 0 0 0 0 0 " #teff terr logg loggerr feh feherr vrot vroterr
output = output + str(entry[14]) + " " #ccfheight
output = output + str(entry[15]) + " " #exptime
output = output + str(entry[16]) + "\n" #S/N
if entry[0] == "ST":
output = output + entry[1] + " " #name
output = output + str(2400000 + entry[2]) + " " #mjd
output = output + "-999 -999 " #rv rverr
output = output + str(entry[6]) + " " #tel
output = output + str(entry[7]) + " " #res
output = output + str(entry[8]) + " 300 " #teff
#output = output + str(entry[9]) + " "
output = output + str(entry[10]) + " 0.3 " #logg
#output = output + str(entry[11]) + " "
output = output + str(entry[12]) + " 0.5 " #feh
output = output + "0 0 0 " #feh feherr vrot vroterr ccfheight
output = output + str(entry[15]) + " " #exptime
output = output + str(entry[16]) + "\n" #S/N
output_file = open("HSintersigma_output","w")
output_file.write(output)
output_file.close()
print output
print "No. of observations: ",len(exposure_info)
update = raw_input("Update hatsouth.intersigma webpage? (y/n): ")
if update == "y":
os.system("./insertHSvelocities.py HSintersigma_output")
remove_temp = raw_input("Remove HSintersigma_output temporary ascii file? (y/n): ")
if remove_temp == "y":
os.system("rm HSintersigma_output")
| [
"george@mso.anu.edu.au"
] | george@mso.anu.edu.au |
3e95b1fcd6eb9ba540155a5e5a08c8373ce2af48 | 6aced15ae21ee17548ca1507fdbdfcbc542676d4 | /Python dla każdego/Chapter11/high_score.py | 19041af43af8d9c2799f9182edd198c758743ffe | [] | no_license | crazydeveloper09/Praktyki-zawodowe | 5c5a13e6a7167ad81ce31ee132eeb3d21a88ae0a | 16ea62308bf11db2af6b17e6eeaa72b66ac8438d | refs/heads/main | 2023-01-03T16:15:26.561050 | 2020-10-28T13:33:34 | 2020-10-28T13:33:34 | 301,637,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | #!/usr/bin/env python3
# Autor: Maciej Kuta
# Data utworzenie: 12.10.2020
# Cel: Tworzenie elementu tekstu
from superwires import games,color
import pygame
games.init(screen_width = 640, screen_height = 480, fps=50)
wall_image = games.load_image("sciana.jpg")
games.screen.background = wall_image
pizza_image = games.load_image("pizza.bmp")
pizza = games.Sprite(image=pizza_image, x = 390, y = 250)
games.screen.add(pizza)
score = games.Text(
value=33,
size=20,
color=color.black,
x=530,
y=30
)
games.screen.add(score)
games.screen.mainloop() | [
"maciekmessi9@gmail.com"
] | maciekmessi9@gmail.com |
8ff0c05390f0d5b6948b2b041e12f48526f1433d | 4080a29961f79c2294204355ff9bf8d9f5e09954 | /Djago Website-/djangoWebsite/apps/accounts/models.py | 727ab55415ddcd50e4a59a8256d80e3f087d2c68 | [] | no_license | ArmanSh7/Django-Tutorials | c5d4d493a354b7deb02935b30de745a84d548366 | 997db1d1af9b709a4c6e3759a86c0ff236ec426b | refs/heads/main | 2023-03-23T10:19:57.406338 | 2021-03-21T09:28:49 | 2021-03-21T09:28:49 | 349,938,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,112 | py | from django.db import models
from django.contrib.auth.models import User
class UserInterest(models.Model):
name = models.CharField(max_length=64, unique=True)
normalized_name = models.CharField(max_length=64, unique=True)
def __str__(self):
return self.name
class UserPersona(models.Model):
name = models.CharField(max_length=64, unique=True)
normalized_name = models.CharField(max_length=64, unique=True)
description = models.CharField(max_length=200)
def __str__(self):
return self.name
class UserProfile(models.Model):
# owner
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name="profile")
# settings
is_full_name_displayed = models.BooleanField(default=True)
# details
bio = models.CharField(max_length=500, blank=True, null=True)
website = models.URLField(max_length=200, blank=True, null=True)
persona = models.ForeignKey(
UserPersona, on_delete=models.SET_NULL, blank=True, null=True
)
interests = models.ManyToManyField(UserInterest, blank=True)
| [
"noreply@github.com"
] | ArmanSh7.noreply@github.com |
0a515603f51658194edd388c66b4f36d49ad57ef | 303c62282728f3e480bf974328c41eb85b3d8fc3 | /LeftViewBST.py | 05607b17e2296d9e7b3606643d8804e0560bbb5a | [] | no_license | harshbhardwaj5/Coding-Questions- | e0b9036df048535f7932ff391f3364f74a2e66af | 4b8a0752236c51d05035a0d1fe97d128a30971ca | refs/heads/main | 2023-03-30T00:43:14.103788 | 2021-04-05T22:28:08 | 2021-04-05T22:28:08 | 354,985,778 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | # Python program to print left view of Binary Tree
# A binary tree node
class Node:
# Constructor to create a new node
def __init__(self, data):
self.data = data
self.left = None
self.right = None
# Recursive function pritn left view of a binary tree
def leftViewUtil(root, level, max_level):
# Base Case
if root is None:
return
# If this is the first node of its level
if (max_level[0] < level):
print (root.data)
max_level[0] = level
# Recur for left and right subtree
leftViewUtil(root.left, level + 1, max_level)
leftViewUtil(root.right, level + 1, max_level)
# A wrapper over leftViewUtil()
def leftView(root):
max_level = [0]
leftViewUtil(root, 1, max_level)
# Driver program to test above function
root = Node(12)
root.left = Node(10)
root.right = Node(20)
root.right.left = Node(25)
root.right.right = Node(40)
leftView(root)
# This code is contributed by Nikhil Kumar Singh(nickzuck_007)
| [
"noreply@github.com"
] | harshbhardwaj5.noreply@github.com |
c09d6a929e893b7031daa3be70c2f035112b93bc | c9767c5a4c354aaaf520cb1c169445901dd18bc1 | /user_blog/migrations/0001_initial.py | 7076db18a50c7bc4b398d58d45116c83142ac801 | [] | no_license | Aroon-AD/local | db948cac38fa0336b6e368cd96ce15dac68a8ebc | ac0a7fd466949751a35d5c77e3fc0a01a0e47aed | refs/heads/master | 2023-02-23T06:21:45.007602 | 2021-01-26T06:02:58 | 2021-01-26T06:02:58 | 331,239,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | # Generated by Django 3.1.4 on 2021-01-19 16:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BlogModel',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('blog_title', models.CharField(max_length=20)),
('blog', models.TextField()),
],
),
migrations.CreateModel(
name='CommentModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('your_name', models.CharField(max_length=20)),
('comment_text', models.TextField()),
('blog', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user_blog.blogmodel')),
],
),
]
| [
"atleearun96@gmail.com"
] | atleearun96@gmail.com |
9338d824c4feb0eab748d2e861726eb5f66b642a | c72285f9b03ebe2f8b555ed841e72aa104c67a06 | /node_modules/socket.io/node_modules/socket.io-client/node_modules/ws/build/config.gypi | 97ac5ce24d5e208756895255d485cc7395948b2b | [
"MIT"
] | permissive | kurai021/Fireworks | 93b3053b0e60eba943ef76ec8adade3f1655d742 | 99b2873c684a703cc69389c5465bb5e8c4676afc | refs/heads/master | 2021-01-18T11:20:55.033052 | 2014-04-25T17:24:03 | 2014-04-25T17:24:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,024 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 47,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_systemtap": "false",
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "true",
"nodedir": "/home/richard/.node-gyp/0.10.20",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"pre": "",
"sign_git_tag": "",
"always_auth": "",
"user_agent": "node/v0.10.20 linux x64",
"bin_links": "true",
"description": "true",
"fetch_retries": "2",
"init_version": "0.0.0",
"user": "",
"force": "",
"ignore": "",
"cache_min": "10",
"editor": "vi",
"rollback": "true",
"cache_max": "null",
"userconfig": "/home/richard/.npmrc",
"coverage": "",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/home/richard/tmp",
"userignorefile": "/home/richard/.npmignore",
"yes": "",
"depth": "null",
"save_dev": "",
"usage": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/usr/local",
"registry": "https://registry.npmjs.org/",
"browser": "",
"cache_lock_wait": "10000",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/richard/.npm",
"npaturl": "http://npat.npmjs.org/",
"searchsort": "name",
"version": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "18",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"global": "",
"link": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "true",
"node_version": "v0.10.20",
"tag": "latest",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"strict_ssl": "true",
"username": "",
"dev": "",
"globalconfig": "/usr/local/etc/npmrc",
"init_module": "/home/richard/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/local/etc/npmignore",
"cache_lock_retries": "10",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": ""
}
}
| [
"samahel021@gmail.com"
] | samahel021@gmail.com |
9aace466d11401e0666e77d4323084d355944c09 | 31d87bbac1f17b89352a29c896a6b3b16b9a3bc1 | /fuga/cli/deploy.py | 6463609ece667762ea1d742c3da226bf09d8aa7e | [
"MIT"
] | permissive | ayemos/fuga | 21f75139a2ae62df6212a30d4ea414ce749137c5 | 674165a10252ec5568a470fa250cd0c078013f57 | refs/heads/master | 2020-06-21T17:17:07.381012 | 2019-08-26T08:52:56 | 2019-08-26T08:52:56 | 197,513,405 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | class DeployCommand:
def __init__(self):
pass
def run(self):
'''
#!/usr/bin/env bash
set -e
gcloud composer environments storage dags import --project repro-lab --environment {{cookiecutter.composer_environment_name}} --location asia-northeast1 --source sql --destination {{cookiecutter.experiment_name}}
gcloud composer environments storage dags import --project repro-lab --environment {{cookiecutter.composer_environment_name}} --location asia-northeast1 --source lib.py --destination {{cookiecutter.experiment_name}}
gcloud composer environments storage dags import --project repro-lab --environment {{cookiecutter.composer_environment_name}} --location asia-northeast1 --source {{cookiecutter.experiment_name}}.py --destination {{cookiecutter.experiment_name}}
'''
| [
"me@ayemos.me"
] | me@ayemos.me |
088e78d73350543e499629eb9c162d895b2e67ff | 833b985e9d6a333615b08a94c925ab67b24964f3 | /weathersite/settings.py | 9a909bd5aa9ec81c68f5ab63cc5ad32c804c616e | [] | no_license | detacirbaf/weathersite | 04a8e621e0787672afcee73ee04f54a07bc26fb8 | a36777d05a733df5f24a4ef5967b65bf4f4bbf41 | refs/heads/master | 2021-02-08T21:19:04.776977 | 2020-03-01T18:05:40 | 2020-03-01T18:05:40 | 244,198,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,372 | py | """
Django settings for weathersite project.
Generated by 'django-admin startproject' using Django 2.2.9.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hc70#r^3aajb2=k1e#i)#zlhj#-dfxf99vk-uo9x8lub4%!enk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'weatherapp',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accounts.apps.AccountsConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'weathersite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'weathersite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
EMAIL_BACKEND = "django.core.mail.backends.filebased.EmailBackend"
EMAIL_FILE_PATH = os.path.join(BASE_DIR, "sent_emails")
| [
"34040782+detacirbaf@users.noreply.github.com"
] | 34040782+detacirbaf@users.noreply.github.com |
d8cb47692ef6d24baa04df42f4186887a5d5a330 | ea14b65717683a9dc9e8b3a377b61b3bf4e84e01 | /Problem2.py | 164f123acd0c95601f8b25196bb21edf6ff2d81f | [] | no_license | Jaysparkexel/Array-4 | fb0a110599c566bc0b7a47f6055c66089d5386dc | 2581b7ba7a70b0e35d402ec61877816d087d7b88 | refs/heads/master | 2020-08-24T20:17:07.622566 | 2019-10-23T03:27:07 | 2019-10-23T03:27:07 | 216,898,566 | 0 | 0 | null | 2019-10-22T19:56:49 | 2019-10-22T19:56:49 | null | UTF-8 | Python | false | false | 1,016 | py | # Time Complexity : O(N) (Where N is total number)
# Space Complexity : O(1)
# Did this code successfully run on Leetcode : Yes
# First find local max by chosing max beetween current number and current number plus local max.
# Than find global max by just choosing max of gloabl max and local max.
# By doing both of this operation together we don't have to store local max for all indexes.
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
ln = len(nums)
# Initialize first number as local and global max
global_max = nums[0]
local_max = nums[0]
# Iterate from second number till last number.
for i in range(1, ln):
# update local max by chosing max beetween current number and current number plus locol max.
local_max = max(nums[i], local_max + nums[i])
# update global max by chosing max beetween global max and local max.
global_max = max(global_max, local_max)
return global_max
| [
"noreply@github.com"
] | Jaysparkexel.noreply@github.com |
93eacaab8b4f89012dc068a17ea94bc6fd04a15f | 114fdcc1fe09f2c0131ddfb324f11c38045961ac | /env/lib/python3.7/random.py | a3ab1e873cf9f3e16bd8e03c0891b2ad1d1f8732 | [] | no_license | pravee2667/QueueAPIPython | d06ff27986745fcfa6b96a4d71c11efbbfc84c8b | 4f150a7dfe47f0c69d082a28a8237756554e44da | refs/heads/master | 2020-11-30T00:38:55.664209 | 2020-05-22T11:59:36 | 2020-05-22T11:59:36 | 230,252,781 | 0 | 1 | null | 2020-07-22T13:28:02 | 2019-12-26T11:30:52 | JavaScript | UTF-8 | Python | false | false | 46 | py | /home/ubuntu/anaconda3/lib/python3.7/random.py | [
"praveen.sabhiniveeshu@pactera.com"
] | praveen.sabhiniveeshu@pactera.com |
abba0e0aa89930ba90e7590a28e68dcd60f2821f | de59ece5d773d8607ba7afe747088ff07062494c | /py-core/class-object/data-classes.py | 9653993b7025e8353497b9e2d6fafdcf7cfa964b | [] | no_license | loggar/py | 4094c6919b040dfc0bb5453dc752145b5f3b46ba | 1116969fa6de00bbc30fe8dcf6445aa46190e506 | refs/heads/master | 2023-08-21T16:47:41.721298 | 2023-08-14T16:12:27 | 2023-08-14T16:12:27 | 114,955,782 | 0 | 0 | null | 2023-07-20T15:11:04 | 2017-12-21T03:01:54 | Python | UTF-8 | Python | false | false | 657 | py | from dataclasses import dataclass
"""
Since version 3.7, Python offers data classes. There are several advantages over regular classes or other alternatives like returning multiple values or dictionaries:
- a data class requires a minimal amount of code
- you can compare data classes because __eq__ is implemented for you
- you can easily print a data class for debugging because __repr__ is implemented as well
- data classes require type hints, reduced the chances of bugs
"""
@dataclass
class Card:
rank: str
suit: str
card = Card("Q", "hearts")
print(card == card)
# True
print(card.rank)
# 'Q'
print(card)
Card(rank='Q', suit='hearts')
| [
"charly.loggar@gmail.com"
] | charly.loggar@gmail.com |
90ac2ad1e0b7063dfef73d132d9267d5d61701ca | 1b09b2fa631976e10214719eea6aaf4b2f7d3531 | /kdenlive2dvdchapter.py | 1d5bb8297c65e25d9963f52c056d0a1718407f66 | [] | no_license | mvo5/kdelive2dvdchapter | fba5a991610fc967120318f7381347902ed29ec5 | a425f1f1e9fad52a3193a3411677f80d99d51ced | refs/heads/master | 2023-08-27T09:06:18.432361 | 2016-10-07T21:19:26 | 2016-10-07T21:19:26 | 70,283,775 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | #!/usr/bin/python3
import os
import re
import sys
import locale
if __name__ == "__main__":
locale.setlocale(locale.LC_ALL, '')
if len(sys.argv) < 1:
print("need kdenlive file as first argument")
os.exit(1)
sec2title = {}
with open(sys.argv[1]) as fp:
for line in fp:
m = re.search(r'<property name\="kdenlive\:guide\.([0-9.,]+)">(.*)</property>', line)
if not m:
continue
sec = locale.atof(m.group(1))
title = m.group(2)
sec2title[sec] = title
fps=25
print('<chapters ftp="%s">' % fps)
print(' <chapter title="Start" time="0" />')
for sec in sorted(sec2title):
title = sec2title[sec]
frame = int(sec * fps)
print(' <chapter title="%s" time="%s" />' % (title, frame))
print('</chapters>')
| [
"mvo@ubuntu.com"
] | mvo@ubuntu.com |
274dcd0c52a91cf0f6087687e517b5b9b8a15589 | c583fbc131307c4f868c14c08525efec0f325bef | /moveElementToEnd.py | 6e8596bf7b67feb4777083e2e8c13ca912024c8c | [] | no_license | phongluudn1997/leet_code | 8c7ccc8597c754c3ce0f794667777c3acea50fa2 | c0fc0a53f44967b66afb101daaf6be05dedec24d | refs/heads/master | 2021-03-28T11:56:02.646965 | 2021-03-13T10:57:09 | 2021-03-13T10:57:09 | 247,861,580 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | """
Đề bài: Cho một array, một number, sắp xếp array để đẩy number ve phia cuoi array.
input: [2,1,2,2,2,3,4,2] vs 2
output: [1,3,4,2,2,2,2,2]
"""
def useTwoPointer(list, num):
left = 0
right = len(list) - 1
while left < right:
if list[right] == num:
right -= 1
if list[left] == num:
list[left], list[right] = list[right], list[left]
left += 1
right -= 1
else:
left += 1
return list
result = useTwoPointer([2, 1, 2, 2, 2, 3, 4, 2], 2)
print(result)
| [
"luuhuynh@me.com"
] | luuhuynh@me.com |
5342b8b9cb499b61fb7c6bed134972fa7ce0fdde | 8c41af5a759f5535bfaee668016d18c3133be0c7 | /develop_backward/some_backward_code.py | a5048a793666a14b4694ca1de2cc8108ec268a23 | [] | no_license | longsjj/gitLearnAgain | 99e6eea09cc408f392cddd011e279d72ce485f68 | a863eb0cb885a7815ca8571d2bf4680f7f20bcd1 | refs/heads/master | 2020-04-10T18:02:37.645169 | 2018-12-12T09:47:04 | 2018-12-12T09:47:04 | 161,192,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | #this code is about backward | [
"rongjunzeng@gmail.com"
] | rongjunzeng@gmail.com |
23229f1ad0dffaa8acbc6c521de65f535750f4ac | ae52435dd0ac9104fb55cfc0df307af6cd4fb09b | /amstar-02/day_03/code/exceptions_demo.py | 8a2f1f9025ef7063d9f9706c878dfc2c91fe75c4 | [] | no_license | mindful-ai/oracle | 54e79cc65153cf9df6c081a40313317893abc0c6 | 22749bf19ba06ad29e9ee149ace00a477e56f975 | refs/heads/master | 2020-12-28T01:07:26.021945 | 2020-05-08T12:37:04 | 2020-05-08T12:37:04 | 238,131,148 | 0 | 0 | null | 2020-02-07T22:30:47 | 2020-02-04T05:32:29 | Python | UTF-8 | Python | false | false | 511 | py | '''
try:
Logic to be tried
except:
What do you want to do if the exception occurs
else:
Statements to execute when you tried something and
no exception occured
finally:
Statements that define what will happen finally
'''
try:
f = open(r"C:\Users\Purushotham\Desktop\oracle\d1.dat", "r")
#except Exception as E:
except FileNotFoundError:
print('The file is not found!')
#print(E)
else:
print('File found')
f.close()
finally:
print('Done!')
| [
"noreply@github.com"
] | mindful-ai.noreply@github.com |
ba83ea879f56573a94677b5192c71ad98efbfb5f | ad59fb12042bfd3f5c43eca057d0f747f9e148cf | /MSNweather-NP/Components/Converter/MSNWeatherNP.py | 583508b5743ce65587ef197aea3f1bcf74cae7e3 | [] | no_license | lexlong2007/eePlugins | d62b787100a7069ad5713a47c5688008063b45ec | 167b262fe36901a2d3a2fae6d0f85e2307b3eff7 | refs/heads/master | 2022-03-09T05:37:37.567937 | 2022-02-27T01:44:25 | 2022-02-27T01:44:25 | 253,012,126 | 0 | 0 | null | 2020-04-04T14:03:29 | 2020-04-04T14:03:29 | null | UTF-8 | Python | false | false | 15,991 | py | # -*- coding: utf-8 -*-
#
# WeatherPlugin E2
#
# Coded by Dr.Best (c) 2012-2013, mod j00zek 2020-2021
# Support: www.dreambox-tools.info
# E-Mail: dr.best@dreambox-tools.info
#
# This plugin is open source but it is NOT free software.
#
# This plugin may only be distributed to and executed on hardware which
# is licensed by Dream Multimedia GmbH.
# In other words:
# It's NOT allowed to distribute any parts of this plugin or its source code in ANY way
# to hardware which is NOT licensed by Dream Multimedia GmbH.
# It's NOT allowed to execute this plugin and its source code or even parts of it in ANY way
# on hardware which is NOT licensed by Dream Multimedia GmbH.
#
# If you want to use or modify the code or parts of it,
# you have to keep MY license and inform me about the modifications by mail.
#
from __future__ import absolute_import #zmiana strategii ladowanie modulow w py2 z relative na absolute jak w py3
from Components.config import config
from Components.Converter.Converter import Converter
from Components.Element import cached
from Components.j00zekModHex2strColor import Hex2strColor
from Plugins.Extensions.MSNweather.__init__ import _
from os import path
import datetime
DBG=False # for quick debugs
class MSNWeatherNP(Converter, object):
CURRENT = -1
DAY1 = 1
DAY2 = 2
DAY3 = 3
DAY4 = 4
DAY5 = 5
DAY6 = 6
DAY7 = 7
DAY8 = 8
DAY9 = 9
DAY10 = 10
DAY11 = 11
DAY12 = 12
DAY13 = 13
DAY14 = 14
CITY = 15
TEMPERATURE_HEIGH = 16
TEMPERATURE_LOW = 17
TEMPERATURE_TEXT = 18
TEMPERATURE_CURRENT = 19
WEEKDAY = 20
WEEKSHORTDAY = 21
DATE = 22
OBSERVATIONTIME = 23
OBSERVATIONPOINT = 24
FEELSLIKE = 25
HUMIDITY = 26
WINDDISPLAY = 27
ICON = 28
TEMPERATURE_HEIGH_LOW = 29
CODE = 30
PATH = 31
FULLDATE = 32
WEATHERDICT = 33
DAILYDICT = 34
HOURLYDICT = 35
CURRENTDICT = 36
METEOGRAM = 37
def __init__(self, type):
Converter.__init__(self, type)
self.index = None
self.mode = None
self.mode2 = ''
self.path = None
self.extension = None
self.indexTXT = None
self.dictWeather = {}
self.dictWeatherRUNs = []
if type == "city": self.mode = self.CITY
elif type == "observationtime": self.mode = self.OBSERVATIONTIME
elif type == "observationpoint": self.mode = self.OBSERVATIONPOINT
elif type == "temperature_current": self.mode = self.TEMPERATURE_CURRENT
elif type == "feelslike": self.mode = self.FEELSLIKE
elif type == "humidity": self.mode = self.HUMIDITY
elif type == "winddisplay": self.mode = self.WINDDISPLAY
elif type.startswith("METEOGRAM"):
self.mode = self.METEOGRAM
elif type.startswith("DailyRecord="):
self.mode = self.DAILYDICT
self.mode2 = type.replace('Daily','')
elif type.startswith("HourlyRecord="):
self.mode = self.HOURLYDICT
self.mode2 = type.replace('Hourly','')
elif type.startswith("Current"):
self.mode = self.CURRENTDICT
self.mode2 = type.replace('Current','')
elif type.startswith("RUN|") or type.startswith("GET|"):
try:
self.dictWeatherRUNs = type.split('|')
self.dictWeatherRUNs.pop(0)
if len(self.dictWeatherRUNs) > 0:
self.mode = self.WEATHERDICT
for n, cmd in enumerate(self.dictWeatherRUNs):
#self.EXCEPTIONDEBUG('cmd= |%s|' % cmd)
if cmd.startswith("0x"):
self.dictWeatherRUNs[n] = Hex2strColor(int(cmd, 16))
except Exception as e:
self.EXCEPTIONDEBUG('__init__ Exception enumarating RUN|GET %s' % str(e))
else:
if type.find("weathericon") != -1: self.mode = self.ICON
elif type.find("temperature_high") != -1: self.mode = self.TEMPERATURE_HEIGH
elif type.find("temperature_low") != -1: self.mode = self.TEMPERATURE_LOW
elif type.find("temperature_heigh_low") != -1: self.mode = self.TEMPERATURE_HEIGH_LOW
elif type.find("temperature_text") != -1: self.mode = self.TEMPERATURE_TEXT
elif type.find("weekday") != -1: self.mode = self.WEEKDAY
elif type.find("weekshortday") != -1: self.mode = self.WEEKSHORTDAY
elif type.find("date") != -1: self.mode = self.DATE
elif type.find("fulldate") != -1: self.mode = self.FULLDATE
if self.mode is not None:
dd = type.split(",")
if len(dd) >= 2:
self.indexTXT = dd[1]
self.index = self.getIndex(self.indexTXT)
if self.mode == self.ICON and len(dd) == 4:
self.path = dd[2]
self.extension = dd[3]
def EXCEPTIONDEBUG(self, myFUNC = '' , myText = '' ):
from Plugins.Extensions.MSNweather.debug import printDEBUG
printDEBUG( myFUNC , myText , 'MSNWeatherConverter.log' )
def DEBUG(self, myFUNC = '' , myText = '' ):
try:
if DBG or config.plugins.MSNweatherNP.DebugMSNWeatherConverter.value:
from Plugins.Extensions.MSNweather.debug import printDEBUG
printDEBUG( myFUNC , myText , 'MSNWeatherConverter.log' )
except Exception:
pass
def getIndex(self, key):
self.DEBUG('getIndex key="%s"' % key)
if key == "current": return self.CURRENT
elif key == "day1": return self.DAY1
elif key == "day2": return self.DAY2
elif key == "day3": return self.DAY3
elif key == "day4": return self.DAY4
elif key == "day5": return self.DAY5
elif key == "day6": return self.DAY6
elif key == "day7": return self.DAY7
elif key == "day8": return self.DAY8
elif key == "day9": return self.DAY9
elif key == "day10": return self.DAY10
elif key == "day11": return self.DAY11
elif key == "day12": return self.DAY12
elif key == "day13": return self.DAY13
elif key == "day14": return self.DAY14
return None
@cached
def getText(self):
self.DEBUG('getText self.mode=%s, self.index=%s (%s)' %( self.mode, self.index, str(self.indexTXT)))
retText = ''
if self.mode == self.CITY:
retText = self.source.getCity()
elif self.mode == self.OBSERVATIONPOINT:
retText = self.source.getObservationPoint()
elif self.mode == self.OBSERVATIONTIME:
retText = self.source.getObservationTime()
elif self.mode == self.TEMPERATURE_CURRENT:
retText = self.source.getTemperature_Current()
elif self.mode == self.FEELSLIKE:
retText = self.source.getFeelslike()
elif self.mode == self.HUMIDITY:
retText = self.source.getHumidity()
elif self.mode == self.WINDDISPLAY:
retText = self.source.getWinddisplay()
elif self.mode == self.TEMPERATURE_HEIGH and self.index is not None:
retText = self.source.getTemperature_Heigh(self.index)
elif self.mode == self.TEMPERATURE_LOW and self.index is not None:
retText = self.source.getTemperature_Low(self.index)
elif self.mode == self.TEMPERATURE_HEIGH_LOW and self.index is not None:
retText = self.source.getTemperature_Heigh_Low(self.index)
elif self.mode == self.TEMPERATURE_TEXT and self.index is not None:
retText = self.source.getTemperature_Text(self.index)
elif self.mode == self.WEEKDAY and self.index is not None:
retText = self.source.getWeekday(self.index, False)
elif self.mode == self.WEEKSHORTDAY and self.index is not None:
retText = self.source.getWeekday(self.index, True)
elif self.mode == self.DATE and self.index is not None:
retText = self.source.getDate(self.index)
elif self.mode == self.FULLDATE and self.index is not None:
retText = self.source.getFullDate(self.index)
elif self.mode == self.WEATHERDICT:
try:
for cmd in self.dictWeatherRUNs:
#self.DEBUG('cmd= ,%s,' % cmd)
if cmd.startswith("['"):
#self.DEBUG('running: %s' % cmd)
retText += self.source.dictWeather(cmd)
else:
retText += cmd
except Exception as e:
self.EXCEPTIONDEBUG('getText(WEATHERDICT) ','Exception %s running cmd %s' % (str(e), cmd))
elif self.mode == self.DAILYDICT:
try:
mode2 = self.mode2.split(',')
self.DEBUG('DAILYDICT','len(mode) =%s' % str(len(mode2)))
if len(mode2) >= 2:
dictTree = "['dailyData']"
record = mode2[0]
#self.DEBUG('DAILYDICT ','record: %s' % str(record))
item = mode2[1]
#self.DEBUG('DAILYDICT ','item: %s' % str(item))
day = int(record.split('=')[1])
Month = _((datetime.date.today() + datetime.timedelta(days=day)).strftime("%b"))
dictTree += "['%s']" % record
recordDict = self.source.dictWeather(dictTree)
#self.DEBUG('DAILYDICT ','recordDict:%s' % str(recordDict))
if item == 'date':
weekday = recordDict['weekday']
monthday = recordDict['monthday']
retText = str('%s. %s %s' % (weekday, monthday, Month))
elif item == 'info':
temp_high = recordDict['temp_high']
temp_low = recordDict['temp_low']
rainprecip = recordDict['rainprecip']
skytext = recordDict['skytext']
retText = str('%s/ %s/ %s\n%s' % (temp_high, temp_low, rainprecip, skytext))
elif item in ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9'):
retText = str('%s' % recordDict[int(item)].strip())
except Exception as e:
self.EXCEPTIONDEBUG('getText(DAILYDICT) ','Exception %s' % str(e))
elif self.mode == self.HOURLYDICT:
try:
mode2 = self.mode2.split(',')
self.DEBUG('getText(HOURLYDICT)','mode:%s, len(%s)' % (str(mode2),len(mode2)))
dictTree = "['hourlyData']"
record = mode2[0]
self.DEBUG('getText(HOURLYDICT) ','record: %s' % str(record))
dictTree += "['%s']" % record
recordDict = self.source.dictWeather(dictTree)
self.DEBUG('getText(HOURLYDICT) ','recordDict:%s' % str(recordDict))
if len(mode2) == 1:
time = recordDict['time']
skytext = recordDict['skytext']
temperature = recordDict['temperature']
rainprecip = recordDict['rainprecip']
retText = str('%s\n\n\n%s\nTemp. %s\nOpady %s' % (time, skytext, temperature, rainprecip))
else:
item = mode2[1]
if item in ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9'):
retText = str('%s' % recordDict[int(item)].strip())
except Exception as e:
self.EXCEPTIONDEBUG('getText(HOURLYDICT) ','Exception %s' % str(e))
return str(retText)
text = property(getText)
@cached
def getIconFilename(self):
self.DEBUG('getIconFilename >>> self.mode = %s , self.index = %s (%s)' % (self.ICON,self.index, str(self.indexTXT)) )
retVal = ''
if self.mode == self.ICON and self.index in (self.CURRENT, self.DAY1, self.DAY2, self.DAY3, self.DAY4, self.DAY5):
self.DEBUG('\t self.mode = self.ICON')
if self.path is not None and self.extension is not None:
self.DEBUG('\t self.path is not None and self.extension is not None')
retVal = self.path + self.source.getCode(self.index) + "." + self.extension
else:
self.DEBUG('\t self.path is None and self.extension is None')
retVal = self.source.getWeatherIconFilename(self.index)
self.DEBUG('\t getWeatherIconFilename(%s) returned %s' % (self.index,retVal))
if not retVal.endswith('.png'):
retVal = retVal + ".png"
if len(retVal) <= 6:
retVal = self.source.getIconPath() + retVal
elif self.mode == self.DAILYDICT:
try:
mode2 = self.mode2
#self.DEBUG('DAILYDICT','mode2: %s' % str(mode2))
dictTree = "['dailyData']"
dictTree += "['%s']" % mode2
recordDict = self.source.dictWeather(dictTree)
#self.DEBUG('DAILYDICT ','recordDict:%s' % str(recordDict))
if config.plugins.MSNweatherNP.IconsType.value != "serviceIcons":
iconFileName = recordDict['iconfilename'].strip()
#self.DEBUG('DAILYDICT ','iconFileName:%s' % str(iconFileName))
if iconFileName.endswith('.png') and path.exists(iconFileName):
return str(iconFileName)
#service icons or not found
return str(recordDict['imgfilename'].strip())
except Exception as e:
self.EXCEPTIONDEBUG('getIconFilename(DAILYDICT) ','Exception %s' % str(e))
elif self.mode == self.HOURLYDICT:
try:
mode2 = self.mode2
self.DEBUG('HOURLYDICT','mode2: %s' % str(mode2))
dictTree = "['hourlyData']"
dictTree += "['%s']" % mode2
recordDict = self.source.dictWeather(dictTree)
self.DEBUG('HOURLYDICT ','recordDict:%s' % str(recordDict))
if config.plugins.MSNweatherNP.IconsType.value != "serviceIcons":
iconFileName = recordDict['iconfilename'].strip()
self.DEBUG('HOURLYDICT ','iconFileName:%s' % str(iconFileName))
if iconFileName.endswith('.png') and path.exists(iconFileName):
return str(iconFileName)
#service icons or not found
return str(recordDict['imgfilename'].strip())
except Exception as e:
self.EXCEPTIONDEBUG('getIconFilename(HOURLYDICT) ','Exception %s' % str(e))
elif self.mode == self.METEOGRAM:
retVal = '/usr/lib/enigma2/python/Plugins/Extensions/MSNweather/icons/meteogram.png'
elif self.mode == self.WEATHERDICT:
try:
for cmd in self.dictWeatherRUNs:
self.DEBUG('cmd= |%s|' % str(cmd))
if cmd.startswith("['"):
if config.plugins.MSNweatherNP.hIconsType.value == 'weatherIcons' and cmd.startswith("['hourlyData']['Record=") and cmd.endswith("['imgfilename']"):
cmd = cmd.replace("'imgfilename'","'iconfilename'")
self.DEBUG('running: |%s|' % str(cmd))
retVal = str(self.source.dictWeather(cmd))
self.DEBUG('received: |%s|' % retVal)
except Exception as e:
self.EXCEPTIONDEBUG('getIconFilename(WEATHERDICT) ' , 'Exception %s running cmd %s' % (str(e), cmd))
self.DEBUG('\t Finally converter returns for index "%s" than icon is "%s"' % (self.index,retVal))
return str(retVal)
iconfilename = property(getIconFilename)
| [
"zdzislaw22@windowslive.com"
] | zdzislaw22@windowslive.com |
2e994f655c26627be306118eb78c3ec8cf94a5bf | 85b7340fc8b2294b50b4060d795e207e93d4f24c | /HongXiuSpd/spiders/fiction_free.py | da3c3dc7a82591354b0d20a6b415200a71f72592 | [] | no_license | Jerot-China/HongXiu | 2ec128ba78b17dc640dc3f2a6548dc581545ae83 | 181286af74d29515f394f83659932a256830cb99 | refs/heads/master | 2020-03-21T12:28:19.370872 | 2018-06-25T06:56:51 | 2018-06-25T06:56:51 | 138,554,140 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,265 | py | # -*- coding: utf-8 -*-
import scrapy
from ..items import HongxiuspdItem
from scrapy.http import Request
from lxml import etree
class FictionFreeSpider(scrapy.Spider):
global headers
global gender_id
global base_url
base_url = 'https://www.hongxiu.com'
name = 'fiction_free'
# 1为男生,2为女生
gender_id = 1
allowed_domains = ['www.hongxiu.com']
start_urls = ['https://www.hongxiu.com/free/all?gender='+ str(gender_id)]
headers = {"Accept":"application/json, text/javascript, */*; q=0.01",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"}
def parse(self, response):
# 获取文章总数和每页数量,整除后再+1 得到文章总页面
data_total = response.xpath("//div[@class='pagination']/@data-total").extract()[0]
data_size = response.xpath("//div[@class='pagination']/@data-size").extract()[0]
page = int(data_total)//int(data_size) + 1
for i in range(1,2):
# 拼接url
next_url = 'https://www.hongxiu.com/free/all?gender='+ str(gender_id) + '&pageNum=' + str(i)
print(next_url)
yield Request(next_url, callback=self.get_fiction_url, headers=headers, dont_filter=True)
def get_message(self, response):
global item
# 获取性别,名称,种类,状态,字数,链接
item = HongxiuspdItem()
item['gender'] = gender_id
item['name'] = response.xpath("//div[@class='book-info']/h1/em/text()").extract()
item['category'] = response.xpath("//span[@class='tag']/i[3]/text()").extract()
item['status'] = response.xpath("//span[@class='tag']/i[1]/text()").extract()
item['words'] = response.xpath("//p[@class='total']/span[1]/text()").extract()
# 接受meta
item['url'] = response.meta['fiction_url']
# 获取作者,简介,点击量,收藏量
# 获取简介后转成字符串然后replace掉不需要的字符再转换成List并最终赋值给item['author']
authors = response.xpath("//a[@class='writer default']/text()").extract()
authors = ','.join(authors).replace(' ','').replace('著','')
authors = authors.split(",")
item['author'] = authors
item['clicks'] = response.xpath("//p[@class='total']/span[3]/text()").extract()
item['collections'] = response.xpath("//p[@class='total']/span[2]/text()").extract()
item['introduce'] = response.xpath("//p[@class='intro']/text()").extract()
# 获取目录
directorys = response.xpath("//div[@class='volume']/ul/li/a/text()").extract()
# 获取小说名
name = item['name'][0]
# 生成字典
item['directory'] = {name:directorys}
return item
def get_fiction_url(self, response):
urls = response.xpath("//div[@class='right-book-list']/ul/li//div[@class='book-info']/h3/a/@href").extract()
for url in urls:
fiction_url = base_url + url
# 用meta传递url给回调函数
yield Request(fiction_url, callback=self.get_message, headers=headers, meta={"fiction_url":fiction_url})
| [
"445068326@qq.com"
] | 445068326@qq.com |
78cf3010a7d39bb3f60e80fc43d93261b2ee36c1 | 93ea897ba1e41357e0002aa0534173530356a28c | /jenkins-master/scripts/jenkins_add_user.py | 440a15d75d3fc22d862d5a6befd6cab136e97cb3 | [] | no_license | stanchan/docker-pipeline | 7c2fcefb1e7d1d406654695cbc8946a6492f21dd | 00655764103d2aadc8351df739329ad100c90328 | refs/heads/master | 2021-01-20T01:04:14.577821 | 2017-04-27T19:13:56 | 2017-04-27T19:13:56 | 89,216,182 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | import json
import requests
def main():
data = {
"credentials": {
"scope": "GLOBAL",
"username": "jenkins",
"privateKeySource": {
"privateKey": "-----BEGIN RSA PRIVATE KEY-----\nX\n-----END RSA PRIVATE KEY-----",
"stapler-class": "com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey$DirectEntryPrivateKeySource"
},
"stapler-class": "com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey"
}
}
payload = {
"json": json.dumps(data),
"Submit": "OK",
}
r = requests.post("http://%s:%d/credential-store/domain/_/createCredentials" % ("127.0.0.", 8080), data=payload)
if r.status_code != requests.codes.ok:
print r.text | [
"stanchan@gmail.com"
] | stanchan@gmail.com |
ad8541f817967e57d0e063f5dfc2d2a85df8f005 | d3193f35a99693ff56265882c04342c8c25abb93 | /keyboards/regular/user/balance/__init__.py | bee2dd218b391f336239df8674af4c1e8ee5a6aa | [] | no_license | hero141lc/dice-lottery-bot | 66a7aeaa6b33cd7dce011f31a73aa421939cb61b | fa060e2b5d131dbbc2c2a75fc7ce771ca2d7555b | refs/heads/master | 2023-04-08T04:13:29.331787 | 2021-04-10T20:32:55 | 2021-04-10T20:32:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | from .default import balance_default_keyboard
from .confirm_for_refill import confirm_for_refill_keyboard
__all__ = [
'balance_default_keyboard',
'confirm_for_refill_keyboard'
]
| [
"aleokheen@gmail.com"
] | aleokheen@gmail.com |
a6acf8d89a41828c0923a9d523a6b0f8b4a22c57 | ae01f8a777bd3de349b5306cec2c821cb2a90b3b | /Quantopian1.py | c815ea5e4aca6aa3680ddb9f839732be6a387346 | [] | no_license | Sebas13/modulosPython | 06fde79795f2f12f34ef316f8ef127e7742d78a5 | e9e83837718b13a3c2fc8144b42938131a72de46 | refs/heads/master | 2022-02-14T18:42:02.165305 | 2019-07-31T23:04:51 | 2019-07-31T23:04:51 | 197,960,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,137 | py | from socketserver import StreamRequestHandler, TCPServer
from urllib.request import urlopen
import time
from functools import wraps
#
# class EchoHandler(StreamRequestHandler):
# def handle(self):
# for line in self.rfile:
# self.wfile.write(b'GOT:' + line)
#
#
# serv = TCPServer((' ', 15000), EchoHandler)
#
# class UrlTemplate:
# def __init__(self, template):
# self.template = template
#
# def open(self, **kwargs):
# return urlopen(self.template.format_map(kwargs))
#
# # Carrying extra state with call back functions
#
#
# class ResultHandler:
# def __init__(self):
# self.sequence = 0
#
# def handler(self,result):
# print('[{}] Got: {}'.format(self.sequence, result))
# self.sequence += 1
#
# def sample():
# n = 0
#
# # Closure Function
# def func():
# print('n=', n)
#
# def get_n():
# return n
#
# def set_n(value):
# nonlocal n
# n = value
#
#
#
# f = sample()
# f.set_n(25)
# f()
_formats = {
'ymd': '{d.year}-{d.month}-{d.day}',
'mdy': '{d.month}/{d.day}/{d.year}',
'dmy': '{d.day}/{d.month}/{d.year}'
}
class Date:
def __init__(self, year, month, day):
self.year = year
self.month = month
self.day = day
def __format__(self, code):
if code == ' ':
code = 'ymd'
fmt = _formats[code]
return fmt.format(d=self)
d = Date(2012, 12, 21)
# format(d)
# print(format(d))
#Wrap a function with extra code
# def timethis(func):
# '''
# Decorator that reports the execution time
#
# '''
#
# @wraps(func)
# def wrapper(*args):
# start = time.time()
# result = func(*args)
# end = time.time()
# print(func.__name__, end-start)
# return result
# return wrapper()
#
# #Example of usage
#
#
# @timethis
# def countdown(n):
# '''
# Counts down
# '''
#
# while n > 0:
# n -= 1
#
#
# countdown(55000)
# countdown(55000000000000)
#
def decorator(func):
def check(a, inc):
a = a if a > 0 else 0
inc = inc if inc > 0 else 0
ret = "~~~~~~ " + func(a, inc) + " ~~~~~~"
return ret
return check
@decorator
def nextyear(age, inc):
return "Age after %s year(s): %s" % (inc, age + inc)
# def decorator(func):
# def check(a, inc):
# # fixes both args so they are nonnegative
# a = a if a > 0 else 0
# inc = inc if inc > 0 else 0
# # adds fanciness around returned statement
# ret = "~~~ " + func(a, inc) + " ~~~\n"
# return ret
#
# return check
#
#
# @decorator
# def nextyear(age, inc):
# return "Age after %s year(s): %s" % (inc, age + inc)
print(nextyear(-5, 3)) # 1
print(nextyear(18, -10)) # 19
print(nextyear(-5, 3))
print(nextyear(18, -10))
a_string = "This is a global variable"
def foo():
print(locals())
print(globals())
foo()
def add(x, y):
return x + y
def substract(x, y):
return x - y
def apply(func, x, y):
return func(x, y)
print(apply(add, 2, 8))
print(apply(substract, 2, 8))
| [
"sebas_ferre@hotmail.com"
] | sebas_ferre@hotmail.com |
f18d471f3bf849b8418071b41016466424242b1f | 207d1213ca57d4ebaa47e7f0d91a538291c1e3d7 | /mysite/settings.py | a172ec4d19c1f499a4ce96b6c80329972a229e98 | [] | no_license | cainanandrade/my-first-blog | 632de542f4bb9078e606582bceefbd3238c7ece8 | 4b3303d13a95f766ddf8d0924ba3eab6843077ce | refs/heads/master | 2020-03-08T01:52:39.287081 | 2018-04-15T17:30:28 | 2018-04-15T17:30:28 | 127,843,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,383 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.9.13.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lqr5j55wppr-r6ix3^u^6@s*v5db&=jo8-k!p3tp=o9r#5h*%m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# configurando usuário pythonanywhere:
ALLOWED_HOSTS = ['127.0.0.1', '<your_username>.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# adicionando caminho para arquivos estáticos (CSS):
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"cainanandrade01@gmail.com"
] | cainanandrade01@gmail.com |
f9f37c3ce268110fe9dbfc8f6cbfe32ff5321adc | 48e6a0a842cc4df774184bc75466e9061056c9f3 | /userlogin.py | e935dcc856a54253518449bda016adb404f5f230 | [] | no_license | mgokayb/beginnerpythonprojects | 737b78aeaacca4a23bddd6f25cc2f3b858bf96a8 | 0a8248f9288dd8adf62a4a1a1ff9a6fd85e28e0f | refs/heads/master | 2020-08-21T14:23:10.735252 | 2020-07-01T10:25:43 | 2020-07-01T10:25:43 | 216,178,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | print(""""
************************************
User login
interface!
Enter the your id and password Plz
************************************
""")
try_limit = 3
user_id = "john"
user_pass = "nhoj"
while try_limit > 0:
id = input("ID : ")
password = input("PASSWORD : ")
if try_limit == 0:
print("Failed to log in!! B Y E")
else:
print(f"You have {try_limit-1} chance to log in! Try Again!")
if user_id != id and user_pass == password:
try_limit -= 1
print(f"ID is wrong.")
elif user_id == id and user_pass != password:
try_limit -= 1
print(f"PASSWORD is wrong.")
elif user_id != id and user_pass != password:
try_limit -= 1
print(f"ID and PASSWORD is wrong.")
else:
print(""""
******************************
Hello John. Welcome!
You are logged on.
******************************
""")
| [
"noreply@github.com"
] | mgokayb.noreply@github.com |
c44d4976b88cde33295efe8c178266d8cf5806da | e209f29bec512e38fe1e3bc0561a7b21dad01615 | /ReModule-3/ReModule_3.py | e9ce1443d74536d22db9be803b22af62182cf378 | [] | no_license | hilal-bstn/PythonModules | 76dd9bcb3e3ab4537866b9a9b4ea7775d4a04fd3 | 6604d75fe9e4a6ae73c730cb4f304363b3081495 | refs/heads/master | 2023-03-05T17:17:54.976598 | 2021-02-17T13:00:49 | 2021-02-17T13:00:49 | 339,725,231 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,916 | py | #regular expression
import re
result=dir(re)
str="Python Kursu : Python Programla rehberi | 40 Saat"
result=re.findall("Python",str)#bulduğu "Python" yazılarını liste haline getirir
result=len(result)#kaç tane python yazısı var(listenin uzunluğu)
result=re.split(" ",str)#boşlukları atarak kelimeleri liste şekline getirir.
result=re.sub(" ","-",str)#boşluları - ile değiştir. Boşluk yerine \s de yazılabilir.
result=re.search("Python",str)#kaçıncı karakterler arasında pyhon yazısı var (Çıkıtısı:(0,6) gibi olur)
result=re.findall("[abc]",str)#bulduğu a b c karakterlerini liste haline getirir. []içine yazılan herbir karakteri ayrı ayrı arar
result=re.findall("[a-e]",str)#a dan e ye kadar olan karketleri varsa listeler[a,e,a,]..gibi
result=re.findall("[0-4]",str)#ile 4 arasındaki karakterleri arar
result=re.findall("[^abc]",str)#abc dışındaki karakterler
#not: [0-39]=>[01239]şelinde bir arama gerçeklerştirir
result=re.findall("[^0-9]",str)
result=re.findall("...",str)#3 karakter olarak algılar 3 lü karakterlere böler ve listeler
result=re.findall("Py..on",str)
result=re.findall("^P",str)#p ile başlıyorsa çıktısı:['P']
result=re.findall("t$",str)#t ile bitiyormu
result=re.findall("sa*t",str)#a dan 0 yada daha fazla olabilir saat kelimesi gibi
result=re.findall("sa+t",str)#a 1 den daha fazla olmasını sorgular
result=re.findall("sa?t",str)#sadece 0 yada bir karakter olmasını sorgular
result=re.findall("a{2}",str)#a dan 2 tane olanları getir
result=re.findall("[0-9]{2}",str)#2 basamaklı sayıyı ararken
result=re.findall("a|b",str)#a yada b
result=re.findall("(a|b|)xz",str)#a yada b yada c nin arkasında xz karakterleri gelmelidir
result=re.findall("\$a",str)#$karakterinin arkasında a yı arar
result=re.findall("\APython",str)#str ifadesi Python ile mi başlıyor
result=re.findall("saat\Z",str)#saat ile mi bitiyor
print(result) | [
"hilalbastan@gmail.com"
] | hilalbastan@gmail.com |
9f53ba453fa1cbfb5eaa6fc254ad5e72e9952a7e | 11b9623a8acd968cc683ba8ff8f7527043f6e80b | /uc_pmt_gain/old/discrete_gaussian_with_binomial/cuda_pmt_mc.py | dbe0ec595735ad2b07ebbaced6880c479c1262bc | [] | no_license | mdanthony17/xenon1t | ecae2d2ed0873874d32c02d048f75f8833dc4c47 | 3f56e102fe9af85c8784f0171714d0af777db556 | refs/heads/master | 2020-12-25T05:44:52.213533 | 2017-08-24T13:12:07 | 2017-08-24T13:12:07 | 63,275,516 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 18,374 | py |
cuda_pmt_mc ="""
#include <curand_kernel.h>
extern "C" {
__device__ int gpu_exponential(curandState_t *rand_state, float exp_constant, float exp_offset)
{
// pdf = 1/const * exp(-(x-offset)/const)
return -logf(curand_uniform(rand_state)) * exp_constant + exp_offset;
}
__device__ int gpu_discrete_gaussian(curandState_t *rand_state, float mean, float width)
{
int lower_bound_for_integral = (int)roundf(mean - 3*width);
int upper_bound_for_integral = (int)roundf(mean + 3*width);
float integral_of_dist = 0.;
int k;
// find approximate integral of distribution
for (int i = 0; i < (upper_bound_for_integral-lower_bound_for_integral+1); i++)
{
k = i + lower_bound_for_integral;
integral_of_dist += expf( -powf(k-mean, 2) / powf(width, 2) / 2.);
}
// get uniform random number
float r_uniform = curand_uniform(rand_state);
float cumulative_dist = 0.;
// add lower bound to CDF
cumulative_dist += expf( -powf(lower_bound_for_integral-mean, 2) / powf(width, 2) / 2.);
if (r_uniform < cumulative_dist)
return lower_bound_for_integral;
else
{
for (int i = 0; i < (upper_bound_for_integral-lower_bound_for_integral-1); i++)
{
k = 1 + lower_bound_for_integral + i;
cumulative_dist += expf( -powf(k-mean, 2) / powf(width, 2) / 2.) / integral_of_dist;
if ( (r_uniform > cumulative_dist) && (r_uniform < (cumulative_dist + expf( -powf((k+1)-mean, 2) / powf(width, 2) / 2.) / integral_of_dist)) )
return k+1;
}
}
// at this point must return upper bound since all others failed
return upper_bound_for_integral;
}
__device__ int gpu_binomial(curandState_t *rand_state, int num_trials, float prob_success)
{
/*
int x = 0;
for(int i = 0; i < num_trials; i++) {
if(curand_uniform(rand_state) < prob_success)
x += 1;
}
return x;
*/
// Rejection Method (from 7.3 of numerical recipes)
// slower on 970!!
float pi = 3.1415926535;
int j;
int nold = -1;
float am, em, g, angle, p, bnl, sq, t, y;
float pold = -1.;
float pc, plog, pclog, en, oldg;
p = (prob_success < 0.5 ? prob_success : 1.0 - prob_success);
am = num_trials*p;
if (num_trials < 25)
{
bnl = 0;
for (j=0; j < num_trials; j++)
{
if (curand_uniform(rand_state) < p) bnl += 1;
}
}
else if (am < 1.0)
{
g = expf(-am);
t = 1.;
for (j=0; j < num_trials; j++)
{
t *= curand_uniform(rand_state);
if (t < g) break;
}
bnl = (j <= num_trials ? j : num_trials);
}
else
{
if (num_trials != nold)
{
en = num_trials;
oldg = lgammaf(en+1.);
nold = num_trials;
}
if (p != pold)
{
pc = 1. - p;
plog = logf(p);
pclog = logf(pc);
pold = p;
}
sq = powf(2.*am*pc, 0.5);
do
{
do
{
angle = pi*curand_uniform(rand_state);
y = tanf(angle);
em = sq*y + am;
} while (em < 0. || em >= (en+1.));
em = floor(em);
t = 1.2*sq*(1. + y*y)*expf(oldg - lgammaf(em+1.) - lgammaf(en-em+1.) + em*plog + (en-em)*pclog);
} while (curand_uniform(rand_state) > t);
bnl = em;
}
if (prob_success != p) bnl = num_trials - bnl;
return bnl;
// BTRS method (NOT WORKING)
/*
float p = (prob_success < 0.5 ? prob_success : 1.0 - prob_success);
float spq = powf(num_trials*p*(1-p), 0.5);
float b = 1.15 + 2.53 * spq;
float a = -0.0873 + 0.0248 * b + 0.01 * p;
float c = num_trials*p + 0.5;
float v_r = 0.92 - 4.2/b;
float us = 0.;
float v = 0;
int bnl, m;
float u;
float alpha, lpq, h;
int var_break = 0;
if (num_trials*p < 10)
{
bnl = 0;
for (int j=0; j < num_trials; j++)
{
if (curand_uniform(rand_state) < p) bnl += 1;
}
return bnl;
}
while (1)
{
bnl = -1;
while ( bnl < 0 || bnl > num_trials)
{
u = curand_uniform(rand_state) - 0.5;
v = curand_uniform(rand_state);
us = 0.5 - abs(u);
bnl = (int)floor((2*a/us + b) * u + c);
if (us >= 0.07 && v < v_r) var_break = 1;
if (var_break == 1) break;
}
if (var_break == 1) break;
alpha = (2.83 + 5.1/b)*spq;
lpq = logf(p/(1-p));
m = (int)floor((num_trials+1)*p);
h = lgammaf(m+1) + lgammaf(num_trials-m+1);
v = v*alpha/(a/(us*us) + b);
if (v <= h - lgammaf(bnl+1) - lgammaf(num_trials-bnl+1) + (bnl-m)*lpq) var_break = 1;
if (var_break == 1) break;
}
if (prob_success != p) bnl = num_trials - bnl;
return bnl;
*/
}
// used for finding index for 2d histogram array
// lower bound corresponds to the index
// uses binary search ON SORTED ARRAY
// THIS IS THE TEST WHICH MUST RETURN VOIDS
// AND HAVE POINTER INPUTS
__global__ void test_gpu_find_lower_bound(int *num_elements, float *a_sorted, float *search_value, int *index)
{
float *first = a_sorted;
float *iterator = a_sorted;
int count = *num_elements;
int step;
if (*search_value < a_sorted[0] || *search_value > a_sorted[*num_elements])
{
*index = -1;
return;
}
while (count > 0)
{
iterator = first;
step = count / 2;
iterator += step;
if (*iterator < *search_value)
{
first = ++iterator;
count -= step + 1;
}
else
{
count = step;
}
// -1 to get lower bound
*index = iterator - a_sorted - 1;
}
}
// used for finding index for 2d histogram array
// lower bound corresponds to the index
// uses binary search ON SORTED ARRAY
__device__ int gpu_find_lower_bound(int *num_elements, float *a_sorted, float search_value)
{
float *first = a_sorted;
float *iterator = a_sorted;
int count = *num_elements;
int step;
if (search_value < a_sorted[0] || search_value > a_sorted[*num_elements])
{
return -1;
}
while (count > 0)
{
iterator = first;
step = count / 2;
iterator += step;
if (*iterator < search_value)
{
first = ++iterator;
count -= step + 1;
}
else
{
count = step;
}
}
// -1 to get lower bound
return iterator - a_sorted - 1;
}
#define CURAND_CALL ( x ) do { if (( x ) != CURAND_STATUS_SUCCESS ) {\
printf (" Error at % s :% d \ n " , __FILE__ , __LINE__ ) ;\
return EXIT_FAILURE ;}} while (0)
#include <stdio.h>
__global__ void setup_kernel (int nthreads, curandState *state, unsigned long long seed, unsigned long long offset)
{
int id = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
//printf("hello\\n");
if (id >= nthreads)
return;
/* Each thread gets same seed, a different sequence number, no offset */
curand_init (seed, id, offset, &state[id]);
}
__global__ void cascade_pmt_model(curandState *state, int *num_trials, int *num_loops, float *a_hist, float *mean_num_pe, float *prob_hit_first_dynode, float *mean_e_from_dynode, float *width_e_from_dynode, float *probability_electron_ionized, float *bkg_mean, float *bkg_std, int *num_bins, float *bin_edges)
{
//printf("hello\\n");
int iteration = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
curandState s = state[iteration];
int bin_number;
int num_dynodes = 12;
float f_tot_num_pe;
int pe_from_first_dynode;
int current_num_dynodes;
int num_electrons_leaving_dynode;
int repetition_number;
if (iteration < *num_trials)
{
for (repetition_number=0; repetition_number < *num_loops; repetition_number++)
{
//printf("hello\\n");
int i_tot_num_pe = curand_poisson(&s, *mean_num_pe);
current_num_dynodes = num_dynodes;
if (*prob_hit_first_dynode < 0 || *prob_hit_first_dynode > 1)
{
state[iteration] = s;
continue;
//return;
}
/*
if (gpu_binomial(&s, 1, *prob_hit_first_dynode) < 1)
{
current_num_dynodes -= 1;
}
*/
pe_from_first_dynode = gpu_binomial(&s, i_tot_num_pe, 1-*prob_hit_first_dynode);
i_tot_num_pe -= pe_from_first_dynode;
// check if all PE are from first dynode
// if so just pretend all were from cathode
// but assume one less dynode
if (i_tot_num_pe == 0)
{
i_tot_num_pe = pe_from_first_dynode;
pe_from_first_dynode = 0;
current_num_dynodes -= 1;
}
if (*mean_e_from_dynode < 0)
{
state[iteration] = s;
continue;
//return;
}
if (i_tot_num_pe > 0)
{
for (int i = 0; i < current_num_dynodes; i++)
{
// after first dynode add the PE originating from
// first dynode back in
if (i == 1)
i_tot_num_pe += pe_from_first_dynode;
if (i_tot_num_pe < 10000)
{
if (i_tot_num_pe < 15)
{
num_electrons_leaving_dynode = (int)gpu_discrete_gaussian(&s, *mean_e_from_dynode*i_tot_num_pe, *width_e_from_dynode*powf(i_tot_num_pe, 0.5));
if (num_electrons_leaving_dynode < 1)
continue;
}
else
num_electrons_leaving_dynode = (int)roundf( (curand_normal(&s) * *width_e_from_dynode*powf(i_tot_num_pe, 0.5)) + *mean_e_from_dynode*i_tot_num_pe );
i_tot_num_pe = gpu_binomial(&s, num_electrons_leaving_dynode, *probability_electron_ionized);
}
else
{
num_electrons_leaving_dynode = (int)roundf( (curand_normal(&s) * *width_e_from_dynode*powf(i_tot_num_pe, 0.5)) + *mean_e_from_dynode*i_tot_num_pe );
i_tot_num_pe = (curand_normal(&s) * powf(num_electrons_leaving_dynode**probability_electron_ionized*(1-*probability_electron_ionized), 0.5)) + num_electrons_leaving_dynode**probability_electron_ionized;
}
}
}
if (*bkg_std < 0)
{
state[iteration] = s;
continue;
//return;
}
f_tot_num_pe = (curand_normal(&s) * *bkg_std) + *bkg_mean + i_tot_num_pe;
bin_number = gpu_find_lower_bound(num_bins, bin_edges, f_tot_num_pe);
if (bin_number == -1)
{
state[iteration] = s;
continue;
//return;
}
atomicAdd(&a_hist[bin_number], 1);
state[iteration] = s;
//printf("hi: %f\\n", f_tot_num_pe);
}
return;
}
}
__global__ void pure_cascade_spectrum(curandState *state, int *num_trials, float *a_hist, int *num_pe, float *mean_e_from_dynode, float *width_e_from_dynode, float *probability_electron_ionized, int *num_bins, float *bin_edges)
{
//printf("hello\\n");
int iteration = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
curandState s = state[iteration];
int bin_number;
int num_dynodes = 12;
float f_tot_num_pe;
int i_tot_num_pe = *num_pe;
int num_electrons_leaving_dynode;
if (iteration < *num_trials)
{
if (*mean_e_from_dynode < 0)
{
state[iteration] = s;
return;
}
if (i_tot_num_pe > 0)
{
for (int i = 0; i < num_dynodes; i++)
{
if (i_tot_num_pe < 10000)
{
if (i_tot_num_pe < 15)
{
num_electrons_leaving_dynode = (int)gpu_discrete_gaussian(&s, *mean_e_from_dynode*i_tot_num_pe, *width_e_from_dynode*powf(i_tot_num_pe, 0.5));
if (num_electrons_leaving_dynode < 1)
continue;
}
else
num_electrons_leaving_dynode = (int)roundf( (curand_normal(&s) * *width_e_from_dynode*powf(i_tot_num_pe, 0.5)) + *mean_e_from_dynode*i_tot_num_pe );
i_tot_num_pe = gpu_binomial(&s, num_electrons_leaving_dynode, *probability_electron_ionized);
}
else
{
num_electrons_leaving_dynode = (int)roundf( (curand_normal(&s) * *width_e_from_dynode*powf(i_tot_num_pe, 0.5)) + *mean_e_from_dynode*i_tot_num_pe );
i_tot_num_pe = (curand_normal(&s) * powf(num_electrons_leaving_dynode**probability_electron_ionized*(1-*probability_electron_ionized), 0.5)) + num_electrons_leaving_dynode**probability_electron_ionized;
}
}
}
// remove zero counts (~5% probability that single electron)
// frees zero new electrons
if (i_tot_num_pe == 0)
{
state[iteration] = s;
return;
}
f_tot_num_pe = (float)i_tot_num_pe;
bin_number = gpu_find_lower_bound(num_bins, bin_edges, f_tot_num_pe);
if (bin_number == -1)
{
state[iteration] = s;
return;
}
atomicAdd(&a_hist[bin_number], 1);
state[iteration] = s;
//printf("hi: %f\\n", f_tot_num_pe);
return;
}
}
__global__ void fixed_pe_cascade_spectrum(curandState *state, int *num_trials, int *num_loops, float *a_hist, int *num_pe, float *prob_hit_first_dynode, float *mean_e_from_dynode, float *width_e_from_dynode, float *probability_electron_ionized, float *bkg_mean, float *bkg_std, int *num_bins, float *bin_edges)
{
int iteration = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
curandState s = state[iteration];
int bin_number;
const int num_dynodes = 12;
const int fixed_num_pe = *num_pe;
int current_num_dynodes;
float f_tot_num_pe;
int i_tot_num_pe;
int pe_from_first_dynode;
int num_electrons_leaving_dynode;
int repetition_number;
if (iteration < *num_trials)
{
for (repetition_number=0; repetition_number < *num_loops; repetition_number++)
{
i_tot_num_pe = fixed_num_pe;
current_num_dynodes = num_dynodes;
if (*prob_hit_first_dynode < 0 || *prob_hit_first_dynode > 1)
{
state[iteration] = s;
continue;
//return;
}
/*
if (gpu_binomial(&s, 1, *prob_hit_first_dynode) < 1)
{
current_num_dynodes -= 1;
}
*/
pe_from_first_dynode = gpu_binomial(&s, i_tot_num_pe, 1-*prob_hit_first_dynode);
i_tot_num_pe -= pe_from_first_dynode;
// check if all PE are from first dynode
// if so just pretend all were from cathode
// but assume one less dynode
if (i_tot_num_pe == 0)
{
i_tot_num_pe = pe_from_first_dynode;
pe_from_first_dynode = 0;
current_num_dynodes -= 1;
}
if (*mean_e_from_dynode < 0)
{
state[iteration] = s;
continue;
//return;
}
if (i_tot_num_pe > 0)
{
for (int i = 0; i < current_num_dynodes; i++)
{
// after first dynode add the PE originating from
// first dynode back in
if (i_tot_num_pe < 10000)
{
if (i_tot_num_pe < 15)
{
num_electrons_leaving_dynode = (int)gpu_discrete_gaussian(&s, *mean_e_from_dynode*i_tot_num_pe, *width_e_from_dynode*powf(i_tot_num_pe, 0.5));
if (num_electrons_leaving_dynode < 1)
continue;
}
else
num_electrons_leaving_dynode = (int)roundf( (curand_normal(&s) * *width_e_from_dynode*powf(i_tot_num_pe, 0.5)) + *mean_e_from_dynode*i_tot_num_pe );
i_tot_num_pe = gpu_binomial(&s, num_electrons_leaving_dynode, *probability_electron_ionized);
}
else
{
num_electrons_leaving_dynode = (int)roundf( (curand_normal(&s) * *width_e_from_dynode*powf(i_tot_num_pe, 0.5)) + *mean_e_from_dynode*i_tot_num_pe );
i_tot_num_pe = (curand_normal(&s) * powf(num_electrons_leaving_dynode**probability_electron_ionized*(1-*probability_electron_ionized), 0.5)) + num_electrons_leaving_dynode**probability_electron_ionized;
}
}
}
if (*bkg_std < 0)
{
state[iteration] = s;
continue;
//return;
}
f_tot_num_pe = (curand_normal(&s) * *bkg_std) + *bkg_mean + i_tot_num_pe;
bin_number = gpu_find_lower_bound(num_bins, bin_edges, f_tot_num_pe);
if (bin_number == -1)
{
state[iteration] = s;
continue;
//return;
}
atomicAdd(&a_hist[bin_number], 1);
state[iteration] = s;
//printf("hi: %f\\n", f_tot_num_pe);
}
return;
}
}
// final close
}
""" | [
"mda2149@columbia.edu"
] | mda2149@columbia.edu |
f783b599b6be376489bb5083ca50e75c635d3fba | 88a237bac5889b61416514c4ac3b2b608c5e8036 | /Exercicios/03-12-2019/consumoapi/aula5/02_climatempo_extra.py | e70c65aa26287304e350624469fe80bff2fe2e7f | [] | no_license | TopZeraProductions/desenvolvimento-de-aplicacoes-distribuidas | 14ed5a950a30e9886e01d1a3cc7aec94c615eb82 | 5014c19f9f305734bc1c9df4768e140e71b7ec30 | refs/heads/master | 2020-04-21T04:42:53.403595 | 2019-06-01T18:29:03 | 2019-06-01T18:29:03 | 169,320,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | '''
A API da climatempo está em https://advisor.climatempo.com.br
Faça:
1) Crie uma conta e consiga uma chave sua para acessar.
2) Teste no Firefox um exemplo de temperatura atual
(veja a documentação).
3) Crie uma função temp_sao_paulo que retorna o temperatura
atual em São Paulo.
4) Crie uma função temp_fortaleza que retorna o temperatura
atual em Fortaleza.
5) Crie uma função pega_id que recebe o nome da cidade e
retorna seu id.
6) Desafio: Crie uma função pega_temp que recebe o nome da
cidade e retorna a sua temperatura.
'''
import requests
sua_chave = ''
| [
"paulino.joaovitor@yahoo.com.br"
] | paulino.joaovitor@yahoo.com.br |
95c8b8bfac061abd2b50593bb7208a82fe6d0c7d | 0dc24a6e729a4b438fbcd9cfb72da3b6ee716d77 | /ksiazka_zrob_to_sam/2_ciagi_tekstowe_cwiczenia.py | 063cff87970578c359ddc7c9827b239c8756929d | [] | no_license | Licho59/learning_python_eric_matthes_book | fca84a2bff207c10dec20c7fea9aeacf05d6a101 | 969f95132822d8bd21c30403d8e0bf6aadb9914f | refs/heads/master | 2021-09-01T03:41:37.281741 | 2017-12-24T15:12:13 | 2017-12-24T15:12:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 26 19:24:54 2016
@author: Leszek
"""
name="jan kowalski"
print(name)
print(name.title())
print(name.upper())
#konkatenacja
first_name="jan"
last_name="kowalski"
full_name=first_name + " " + last_name
print(full_name)
print("Witaj, " + full_name.title() + "!")
message="Witaj, " + full_name.title() + "!"
print(message)
first_name="jan"
last_name="kowalski"
full_name=first_name + " " + last_name
print(full_name.title())
komunikat="Witaj, " + full_name.title() + "!"
print(komunikat)
print("Witaj, " + full_name.title() + "!")
#tabulator
print('\tW Szczebrzeszynie chrząszcz')
print('\t100 lat zdrowia')
#nowy wiersz
print('\na=1\nb=2\nc=3')
print("Języki:\nPython\nC\nJavaScipt")
print("Języki:\n\tPython\n\tC\n\tJavaScript")
#usuwanie białego znaku
favorite_language='python '
favorite_language.rstrip()
favorite_language = favorite_language.rstrip()
favorite_language
favorite_language = ' python '
favorite_language
favorite_language.strip()
| [
"lestlalka@gmail.com"
] | lestlalka@gmail.com |
f76e9c358d8d17274301b7938072768ecdd59dbb | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /SQo9Jx5ih2iHG8JAn_15.py | 9d1565ac696693d903c8cff8417d78d66f370fb6 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | """
Create a function that expands a number into a string as shown below:
25 ➞ "20 + 5"
70701 ➞ "70000 + 700 + 1"
685 ➞ "600 + 80 + 5"
### Examples
expanded_form(70304) ➞ "70000 + 300 + 4"
expanded_form(1037903) ➞ "1000000 + 30000 + 7000 + 900 + 3"
expanded_form(802539) ➞ "800000 + 2000 + 500 + 30 + 9"
### Notes
N/A
"""
def expanded_form(num):
num_arr = [int(e) for e in str(num)]
num_str = ""
for i, num in enumerate(num_arr):
if num != 0:
if i != 0:
num_str += " + "
num_str += str(num * (10**(len(num_arr)-1-i)))
return num_str
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.