blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9bbf035b5e759603277ab311b08da9172cf22559
|
cd40fd66338bab16c3cac360ec68d0410daf85dc
|
/asyncio_study/event_loop/utils.py
|
039087d9261d78679ca8a895731fe964988d7754
|
[] |
no_license
|
suhjohn/Asyncio-Study
|
c74a95c37d6ce1d0983b5626a4f68d2b80d7ec79
|
d9c5a092924a32f18849787fd30cb322a0ff8b15
|
refs/heads/master
| 2021-05-12T12:28:15.749447
| 2018-01-14T17:25:22
| 2018-01-14T17:25:22
| 117,414,697
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 585
|
py
|
from functools import wraps
from time import time
def log_execution_time(func):
"""
Decorator function that prints how long it took to execute the function
:param func:
:return:
"""
@wraps(func)
def wrapper(*args, **kwargs):
start = time()
return_value = func(*args, **kwargs)
end = time()
delta = end - start
print(f"Executing {func.__name__} took {delta} seconds.")
return return_value
return wrapper
def fib(n):
return fib(n - 1) + fib(n - 2) if n > 1 else n
timed_fib = log_execution_time(fib)
|
[
"johnsuh94@gmail.com"
] |
johnsuh94@gmail.com
|
a414e93bf6a93d7ea9d9d9fafad47934f70567b8
|
271c7959a39f3d7ff63dddf285004fd5badee4d9
|
/venv/Lib/site-packages/ncclient/devices/h3c.py
|
17e98b5a38712d3469fac56fcd86aaac22fcbffa
|
[
"MIT"
] |
permissive
|
natemellendorf/configpy
|
b6b01ea4db1f2b9109fd4ddb860e9977316ed964
|
750da5eaef33cede9f3ef532453d63e507f34a2c
|
refs/heads/master
| 2022-12-11T05:22:54.289720
| 2019-07-22T05:26:09
| 2019-07-22T05:26:09
| 176,197,442
| 4
| 1
|
MIT
| 2022-12-08T02:48:51
| 2019-03-18T03:24:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,936
|
py
|
"""
Handler for Huawei device specific information.
Note that for proper import, the classname has to be:
"<Devicename>DeviceHandler"
...where <Devicename> is something like "Default", "Huawei", etc.
All device-specific handlers derive from the DefaultDeviceHandler, which implements the
generic information needed for interaction with a Netconf server.
"""
from ncclient.xml_ import BASE_NS_1_0
from .default import DefaultDeviceHandler
from ncclient.operations.third_party.h3c.rpc import *
class H3cDeviceHandler(DefaultDeviceHandler):
"""
H3C handler for device specific information.
In the device_params dictionary, which is passed to __init__, you can specify
the parameter "ssh_subsystem_name". That allows you to configure the preferred
SSH subsystem name that should be tried on your H3C switch. If connecting with
that name fails, or you didn't specify that name, the other known subsystem names
will be tried. However, if you specify it then this name will be tried first.
"""
_EXEMPT_ERRORS = []
def __init__(self, device_params):
super(H3cDeviceHandler, self).__init__(device_params)
def add_additional_operations(self):
dict = {}
dict["get_bulk"] = GetBulk
dict["get_bulk_config"] = GetBulkConfig
dict["cli"] = CLI
dict["action"] = Action
dict["save"] = Save
dict["load"] = Load
dict["rollback"] = Rollback
return dict
def get_capabilities(self):
# Just need to replace a single value in the default capabilities
c = super(H3cDeviceHandler, self).get_capabilities()
return c
def get_xml_base_namespace_dict(self):
return {None: BASE_NS_1_0}
def get_xml_extra_prefix_kwargs(self):
d = {}
d.update(self.get_xml_base_namespace_dict())
return {"nsmap": d}
def perform_qualify_check(self):
return False
|
[
"nate.mellendorf@gmail.com"
] |
nate.mellendorf@gmail.com
|
33280c23ce326b75e21f7c1e6b655ba9c1896cde
|
9680ba23fd13b4bc0fc3ce0c9f02bb88c6da73e4
|
/Bernd Klein (520) ile Python/p_20404a.py
|
bc54c21d16cad7c304cceedabad9a88e4a32d34d
|
[] |
no_license
|
mnihatyavas/Python-uygulamalar
|
694091545a24f50a40a2ef63a3d96354a57c8859
|
688e0dbde24b5605e045c8ec2a9c772ab5f0f244
|
refs/heads/master
| 2020-08-23T19:12:42.897039
| 2020-04-24T22:45:22
| 2020-04-24T22:45:22
| 216,670,169
| 0
| 0
| null | null | null | null |
ISO-8859-9
|
Python
| false
| false
| 683
|
py
|
# coding:iso-8859-9 Türkçe
# p_20404a.py: İnternet ip adresleriyle iletişim kontrolü örneği.
import os, re # threading/ipsiz kontrol...
alınanPaketler = re.compile (r"(\d) alındı")
durum = ("cevapsız", "canlı fakat kayıp", "canlı")
for sonek in range (20,30):
ip = "192.168.178." + str (sonek)
kontrol = os.popen ("çınlattı -q -c2 " + ip, "r")
print ("...çınlatıyor ", ip)
while True:
satır = kontrol.readsatır()
if not satır: break
alınan = alınanPaketler.findall (satır)
if alınan: print (ip + ": " + durum[int (alınan[0])])
#İnternet açık olmalı ve ilgili ip adresleri kontrol edilmeli...
|
[
"noreply@github.com"
] |
mnihatyavas.noreply@github.com
|
f0d53247787de483a8157978732687647973de62
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/cirq_new/cirq_program/startCirq_Class651.py
|
dd0dd8db9096c5bbae3a183ddb1975f869ce8a2d
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,289
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=24
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=18
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=19
c.append(cirq.H.on(input_qubit[0])) # number=20
c.append(cirq.H.on(input_qubit[0])) # number=10
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=11
c.append(cirq.H.on(input_qubit[0])) # number=12
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=13
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=14
c.append(cirq.H.on(input_qubit[0])) # number=15
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=21
c.append(cirq.X.on(input_qubit[2])) # number=22
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=23
c.append(cirq.X.on(input_qubit[2])) # number=17
# circuit end
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =0
info = cirq.final_state_vector(circuit)
qubits = round(log2(len(info)))
frequencies = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startCirq_Class651.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
5db01072a7b1cfb52cc6a3b3c2a3401cc35537e6
|
a5103b7d5066138ac1a9aabc273361491a5031cd
|
/course5/week1/rnn_utils.py
|
eafeffe54a31969af6913bbe0cbfc4a64948fa29
|
[] |
no_license
|
mckjzhangxk/deepAI
|
0fa2f261c7899b850a4ec432b5a387e8c5f13e83
|
24e60f24b6e442db22507adddd6bf3e2c343c013
|
refs/heads/master
| 2022-12-13T18:00:12.839041
| 2021-06-18T03:01:10
| 2021-06-18T03:01:10
| 144,862,423
| 1
| 1
| null | 2022-12-07T23:31:01
| 2018-08-15T14:19:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,663
|
py
|
import numpy as np
def _initial_parameters(n_x,n_a,n_y):
params={}
params['Waa'] = np.random.randn(n_a,n_a)
params['Wax'] = np.random.randn(n_a,n_x)
params['ba'] = np.random.randn(n_a,1)
params['Wya'] = np.random.randn(n_y,n_a)
params['by'] = np.random.randn(n_y,1)
return params
def _initial_gradients(params):
grads={'d'+key:np.zeros_like(value) for key,value in params.items()}
grads['da_next']=0
return grads
def softmax(x):
'''
x have shape [n,m]
:param x:
:return:
'''
x=x-np.max(x,axis=0,keepdims=True)
expx=np.exp(x)
expsum=np.sum(expx,axis=0,keepdims=True)
return expx/expsum
def _unpack(parameter):
return parameter['Waa'],parameter['Wax'],parameter['Wya'],parameter['ba'],parameter['by']
def _rnn_step_forward(xt,a_prev,parameter):
'''
:param xt:shape [nx,m]
:param a_prev: [na_m]
:param parameter: Waa,Wax,Way,bx,by
:return: cache all input and parameter,and a(i+1)
and a y_predition
'''
Waa, Wax, Wya, ba, by=_unpack(parameter)
a_out=np.tanh(Waa.dot(a_prev)+Wax.dot(xt)+ba)
ypred=softmax(Wya.dot(a_out)+by)
cache=xt,a_prev,a_out,parameter
return a_out,ypred,cache
def _rnn_step_backward(dy,cache,gradients):
'''
:param dy:shape[n_y,m]
:param cache:xt,a_prev,parameter
:param gradients: dWaa,dWy,dWax,dba,dby,da_next
:return:gradients
'''
xt,a_prev,a_out,parameter=cache
Waa, Wax, Wya, ba, by = _unpack(parameter)
#from linear prediction
dWya=dy.dot(a_out.T)
dby=np.sum(dy,axis=1,keepdims=True)
da_next=Wya.T.dot(dy)+gradients['da_next']
#from rnn units
dz=da_next*(1-a_out**2)
dWaa=dz.dot(a_prev.T)
dWax=dz.dot(xt.T)
dba=np.sum(dz,axis=1,keepdims=True)
gradients['da_next']=Waa.T.dot(dz)
gradients['dWaa']+=dWaa
gradients['dWax'] += dWax
gradients['dba'] += dba
gradients['dWya'] += dWya
gradients['dby'] += dby
return gradients
def _rnn_forward(x,a_prev,parameter):
'''
:param x: shape [n_x,m,T]
:param a_prev: shape [n_a,m]
:param parameter: Waa,Wax,Way,ba,by
:return: y_pred shape:[n_y,m,T],
caches:a list of all cache
'''
n_x,m,T=x.shape
n_y,n_a=parameter['Wya'].shape
#the return value
y_pred=np.zeros((n_y,m,T))
a_out=np.zeros((n_a,m,T))
caches=[]
for t in range(T):
a_prev,yhat,cache=_rnn_step_forward(x[:,:,t],a_prev,parameter)
y_pred[:,:,t]=yhat
a_out[:,:,t]=a_prev
caches.append(cache)
return y_pred,a_out,caches
def _rnn_backward(dy,caches,param):
'''
:param dy:shape[n_c,m,T]
:param caches: cahces of rnn_forward
:return: gradients
'''
n_y,m,T=dy.shape
gradients=_initial_gradients(param)
for t in reversed(range(T)):
gradients=_rnn_step_backward(dy[:,:,t],caches[t],gradients)
return gradients
def _computeLoss(yhat,y):
'''
:param yhat:
:param y:[n_y,m,T]
:return:
'''
#shape mxT
prob_of_trueLabel=np.sum(yhat*y,axis=0)
prob_of_trueLabel=prob_of_trueLabel.ravel()
loss=np.mean(-np.log(prob_of_trueLabel))
return loss
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras.applications.resnet50 import ResNet50
from keras.applications.inception_v3 import InceptionV3
# VGG16()
VGG19()
ResNet50()
InceptionV3()
m,n_x,n_a,n_y,T=100,27,32,10,60
x=np.random.randn(n_x,m,T)
a_prev=np.random.randn(n_a,m)
params=_initial_parameters(n_x,n_a,n_y)
y_pred,a_out,caches=_rnn_forward(x,a_prev,params)
dy=np.random.randn(n_y,m,T)
gradients=_rnn_backward(dy,caches,params)
|
[
"mckj_zhangxk@163.com"
] |
mckj_zhangxk@163.com
|
e2690fdaa4d677a52afa36499234099fce9dcb9f
|
e3ac7c428aa5b60e021a9440262320ff80f5be88
|
/Ui/welcomescreen.py
|
12640c6e51d951c0941192071407924a8f52cfc2
|
[] |
no_license
|
turamant/PyQt5-PhoneBook
|
b3423f978d2a368e6a997889095d13c5cb47a875
|
522490439889d91a40a651574339e80af6b6db1d
|
refs/heads/main
| 2023-07-11T00:50:10.109781
| 2021-08-10T14:24:54
| 2021-08-10T14:24:54
| 391,993,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,599
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'welcomescreen.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(1178, 798)
self.widget = QtWidgets.QWidget(Dialog)
self.widget.setGeometry(QtCore.QRect(0, 0, 1181, 801))
self.widget.setMinimumSize(QtCore.QSize(831, 731))
self.widget.setStyleSheet("background-color: rgb(232, 232, 232);")
self.widget.setObjectName("widget")
self.labelWelcom = QtWidgets.QLabel(self.widget)
self.labelWelcom.setGeometry(QtCore.QRect(370, 110, 551, 131))
self.labelWelcom.setStyleSheet("font: 28pt \"DejaVu Math TeX Gyre\";\n"
"color: black;")
self.labelWelcom.setObjectName("labelWelcom")
self.loginPushButton = QtWidgets.QPushButton(self.widget)
self.loginPushButton.setGeometry(QtCore.QRect(240, 480, 191, 51))
self.loginPushButton.setStyleSheet("\n"
"font: 18pt \"Cantarell\";\n"
"background-color: rgb(232, 232, 232);")
self.loginPushButton.setObjectName("loginPushButton")
self.signupPushButton = QtWidgets.QPushButton(self.widget)
self.signupPushButton.setGeometry(QtCore.QRect(490, 480, 211, 51))
self.signupPushButton.setStyleSheet("\n"
"font: 18pt \"Cantarell\";\n"
"background-color: rgb(232, 232, 232);")
self.signupPushButton.setObjectName("signupPushButton")
self.cancelPushButton = QtWidgets.QPushButton(self.widget)
self.cancelPushButton.setGeometry(QtCore.QRect(760, 480, 211, 51))
self.cancelPushButton.setStyleSheet("background-color: rgb(232, 232, 232);\n"
"\n"
"font: 18pt \"Cantarell\";\n"
"")
self.cancelPushButton.setObjectName("cancelPushButton")
self.nameuserLineEdit = QtWidgets.QLineEdit(self.widget)
self.nameuserLineEdit.setGeometry(QtCore.QRect(370, 260, 461, 71))
self.nameuserLineEdit.setObjectName("nameuserLineEdit")
self.passwordLineEdit = QtWidgets.QLineEdit(self.widget)
self.passwordLineEdit.setGeometry(QtCore.QRect(370, 360, 461, 71))
self.passwordLineEdit.setObjectName("passwordLineEdit")
self.saveMeCheckBox = QtWidgets.QCheckBox(self.widget)
self.saveMeCheckBox.setGeometry(QtCore.QRect(470, 550, 291, 71))
self.saveMeCheckBox.setStyleSheet("font: 18pt \"Cantarell\";")
self.saveMeCheckBox.setObjectName("saveMeCheckBox")
self.echoPasswordCheckBox = QtWidgets.QCheckBox(self.widget)
self.echoPasswordCheckBox.setGeometry(QtCore.QRect(470, 600, 291, 71))
self.echoPasswordCheckBox.setStyleSheet("font: 18pt \"Cantarell\";")
self.echoPasswordCheckBox.setObjectName("echoPasswordCheckBox")
self.forgotPasswordPushButton = QtWidgets.QPushButton(self.widget)
self.forgotPasswordPushButton.setGeometry(QtCore.QRect(490, 690, 231, 36))
self.forgotPasswordPushButton.setStyleSheet("color: blue;\n"
"font: 14pt \"Cantarell\";\n"
"background-color: rgb(235, 235, 235);\n"
"border:0px;")
self.forgotPasswordPushButton.setObjectName("forgotPasswordPushButton")
self.changePasswordPushButton = QtWidgets.QPushButton(self.widget)
self.changePasswordPushButton.setGeometry(QtCore.QRect(490, 740, 231, 36))
self.changePasswordPushButton.setStyleSheet("color: blue;\n"
"font: 14pt \"Cantarell\";\n"
"background-color: rgb(235, 235, 235);\n"
"border:0px;")
self.changePasswordPushButton.setObjectName("changePasswordPushButton")
self.helpPushButton = QtWidgets.QPushButton(self.widget)
self.helpPushButton.setGeometry(QtCore.QRect(1060, 10, 113, 36))
self.helpPushButton.setStyleSheet("background-color: rgb(232, 232, 232);")
self.helpPushButton.setObjectName("helpPushButton")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.labelWelcom.setText(_translate("Dialog", "Окно авторизации"))
self.loginPushButton.setText(_translate("Dialog", "Войти"))
self.signupPushButton.setText(_translate("Dialog", "Регистрация"))
self.cancelPushButton.setText(_translate("Dialog", "Выход"))
self.nameuserLineEdit.setPlaceholderText(_translate("Dialog", " e-mail"))
self.passwordLineEdit.setWhatsThis(_translate("Dialog", "<html><head/><body><p>sdefesrgvesrgvegrvevgre</p><p>vrbge</p></body></html>"))
self.passwordLineEdit.setPlaceholderText(_translate("Dialog", " Пароль"))
self.saveMeCheckBox.setText(_translate("Dialog", "Запомнить меня"))
self.echoPasswordCheckBox.setText(_translate("Dialog", "Показать пароль"))
self.forgotPasswordPushButton.setText(_translate("Dialog", "Забыли пароль?"))
self.changePasswordPushButton.setText(_translate("Dialog", "Сменить пароль"))
self.helpPushButton.setText(_translate("Dialog", "Help"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
[
"tur1amant@gmail.com"
] |
tur1amant@gmail.com
|
6127db8c4201697208fcbd427c8f2ff605b318ec
|
ea9aa0e93a7f264511ef10b5ccb90f65d958900f
|
/3rd_practice/blog/models.py
|
c0eca233fb31aebd733abc8ef197a867e51945e6
|
[] |
no_license
|
wayhome25/django_travel_blog_2
|
01cf4591e0aa69fb5a3144e0739bd43ce4bebc9c
|
13e5d7ad2851febafb9a57e1fc93bb29c916c4c7
|
refs/heads/master
| 2020-06-01T08:58:24.492163
| 2017-08-26T13:19:45
| 2017-08-26T15:04:18
| 94,069,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,118
|
py
|
from django.core.urlresolvers import reverse
from django.conf import settings
from django.db import models
class Post(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
photo = models.ImageField()
is_public = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog:post_detail', args=[self.pk])
class Comment(models.Model):
post = models.ForeignKey(Post)
author = models.ForeignKey(settings.AUTH_USER_MODEL)
message = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['-id']
def __str__(self):
return self.message
def get_edit_url(self):
return reverse('blog:comment_edit', args=[self.post.pk, self.pk])
def get_delete_url(self):
return reverse('blog:comment_delete', args=[self.post.pk, self.pk])
|
[
"siwabada@gmail.com"
] |
siwabada@gmail.com
|
3996a1c41e99afd8b6eabfd0864dad2c2f4c7187
|
13e1fb78955c03a75cf483725f4811abd1b51ac4
|
/compiler/tests/03_wire_test.py
|
61e21985e7422f475fa91e656c3f095e1c483963
|
[
"BSD-3-Clause"
] |
permissive
|
ucb-cs250/OpenRAM
|
d7a88695ac6820d03b4b365245a1c4962cdc546d
|
3c5e13f95c925a204cabf052525c3de07638168f
|
refs/heads/master
| 2023-01-23T15:09:35.183103
| 2020-10-12T16:05:07
| 2020-10-12T16:05:07
| 318,904,763
| 0
| 0
|
BSD-3-Clause
| 2020-12-05T22:47:14
| 2020-12-05T22:47:13
| null |
UTF-8
|
Python
| false
| false
| 2,212
|
py
|
#!/usr/bin/env python3
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2019 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import unittest
from testutils import *
import sys
import os
sys.path.append(os.getenv("OPENRAM_HOME"))
import globals
class wire_test(openram_test):
def runTest(self):
config_file = "{}/tests/configs/config".format(os.getenv("OPENRAM_HOME"))
globals.init_openram(config_file)
import wire
import tech
import design
layer_stacks = [tech.poly_stack] + tech.beol_stacks
for reverse in [False, True]:
for stack in layer_stacks:
if reverse:
layer_stack = stack[::-1]
else:
layer_stack = stack
# Just make a conservative spacing. Make it wire pitch instead?
min_space = 2 * (tech.drc["minwidth_{}".format(layer_stack[0])] +
tech.drc["minwidth_{}".format(layer_stack[2])])
position_list = [[0, 0],
[0, 3 * min_space],
[1 * min_space, 3 * min_space],
[4 * min_space, 3 * min_space],
[4 * min_space, 0],
[7 * min_space, 0],
[7 * min_space, 4 * min_space],
[-1 * min_space, 4 * min_space],
[-1 * min_space, 0]]
position_list = [[x - min_space, y - min_space] for x, y in position_list]
w = design.design("wire_test_{}".format("_".join(layer_stack)))
wire.wire(w, layer_stack, position_list)
self.local_drc_check(w)
globals.end_openram()
# run the test from the command line
if __name__ == "__main__":
(OPTS, args) = globals.parse_args()
del sys.argv[1:]
header(__file__, OPTS.tech_name)
unittest.main(testRunner=debugTestRunner())
|
[
"mrg@ucsc.edu"
] |
mrg@ucsc.edu
|
4df58b827efdb8b9e33d0f96ef24b7287e582d7b
|
55668bc4e19fd9aa3caa4395add2fe73741eb844
|
/206/main.py
|
29930f8e53ae428345565b5f0a0b9b54dc5cbd5c
|
[
"MIT"
] |
permissive
|
pauvrepetit/leetcode
|
cb8674e10b0dc6beb29e9b64a49bede857b889fd
|
2fda37371f1c5afcab80214580e8e5fd72b48a3b
|
refs/heads/master
| 2023-08-30T07:39:08.982036
| 2023-08-25T08:52:57
| 2023-08-25T08:52:57
| 151,678,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 905
|
py
|
#
# @lc app=leetcode.cn id=206 lang=python3
#
# [206] 反转链表
#
from typing import Optional
# @lc code=start
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def rev(self, head: Optional[ListNode], prev: Optional[ListNode]) -> Optional[ListNode]:
if head == None:
return head
next = head.next
head.next = prev
if next == None:
return head
return self.rev(next, head)
def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:
return self.rev(head, None)
# if head == None or head.next == None:
# return head
# return self.rev(head.next, head)
# @lc code=end
a = ListNode(1, ListNode(2, ListNode(3, ListNode(4, ListNode(5, None)))))
Solution().reverseList(a)
|
[
"hu__ao@outlook.com"
] |
hu__ao@outlook.com
|
6b482961ceb846d151075be3e5574ce30b958fbb
|
9c3c83007c5bf0f36635b0045b2aad7f8a11ac11
|
/novice/03-02/variablerules.py
|
04efe613839917730bde3cf315a1c00a8a16bb91
|
[
"MIT"
] |
permissive
|
septiannurtrir/praxis-academy
|
bc58f9484db36b36c202bf90fdfd359482b72770
|
1ef7f959c372ae991d74ccd373123142c2fbc542
|
refs/heads/master
| 2021-06-21T17:04:58.379408
| 2019-09-13T16:46:08
| 2019-09-13T16:46:08
| 203,007,994
| 1
| 0
|
MIT
| 2021-03-20T01:43:24
| 2019-08-18T13:38:23
|
Python
|
UTF-8
|
Python
| false
| false
| 481
|
py
|
from flask import Flask
app = Flask(__name__)
@app.route('/user/<username>')
def show_user_profile(username):
# show the user profile for that user
return 'User %s' % escape(username)
@app.route('/post/<int:post_id>')
def show_post(post_id):
#show the post with the given id, the id is an integer
return 'Post %d' % post_id
@app.route('/path/<path:subpath>')
def show_subpath(subpath):
#show the subpath after /path/
return 'Subpath %s' % escape(subpath)
|
[
"septiannurtrir@gmail.com"
] |
septiannurtrir@gmail.com
|
b1bb1dfa462be18d9be81098971bfbdf1023cb30
|
7ce761781e7f5b57b2469adce459a71b4758694d
|
/env/lib/python2.7/site-packages/graphlab/toolkits/_internal/search/_search.py
|
0870866d1721ce31a94ab4442179cea4425399d7
|
[] |
no_license
|
hophamtenquang/RecSys
|
c4fa18d1ba262670a284b2fba2ca97b882ef0f4c
|
535472844a046cadd9230302da647a54afff95e8
|
refs/heads/master
| 2021-01-19T17:00:32.924064
| 2017-08-30T10:31:32
| 2017-08-30T10:31:32
| 101,031,687
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,660
|
py
|
import sys as _sys
import graphlab as _gl
import graphlab.connect.main as glconnect
from graphlab.toolkits._internal_utils import _raise_error_if_not_sframe
from graphlab.toolkits._model import SDKModel as _SDKModel
from graphlab.toolkits._main import ToolkitError as _ToolkitError
from graphlab.toolkits._internal_utils import _toolkit_repr_print
from graphlab.util import _make_internal_url
from graphlab.util import _raise_error_if_not_of_type
from graphlab.util import _raise_error_if_not_of_type
def create(data, features=None,
bm25_k1=1.5,
bm25_b=0.75,
tfidf_threshold=0.01,
verbose=True):
"""
Create a searchable index of text columns in an SFrame.
Parameters
----------
data : SFrame
An SFrame containing at least one str column containing text that should
be indexed.
features : list of str
A list of column names that contain text that should be indexed.
Default: all str columns in the provided dataset.
bm25_k1 : float
Tuning parameter for the relative importance of term frequencies when
computing the BM25 score between a query token and a document.
bm25_b : float
Tuning parameter to downweight scores of long documents when
computing the BM25 score between a query token and a document.
tfidf_threshold : float
Tuning parameter to skip indexing words that have a TF-IDF score below
this value.
verbose : bool
Controls whether or not to print progress during model creation.
Returns
-------
out
SearchModel
See Also
--------
SearchModel.query
References
----------
Christopher D. Manning, Hinrich Schutze, and Prabhakar Raghavan.
Introduction to information retrieval.
http://nlp.stanford.edu/IR-book/pdf/irbookonlinereading.pdf
Examples
--------
>>> import graphlab as gl
>>> sf = gl.SFrame({'text': ['Hello my friend', 'I love this burrito']})
>>> m = gl.toolkits._internal.search.create(sf)
>>> print m.query('burrito')
"""
# Input validation on data and features
if features is None:
features = _get_str_columns(data)
_raise_error_if_not_of_type(data, [_gl.SFrame])
_raise_error_if_not_of_type(features, [list])
for f in features:
if data[f].dtype() != str:
raise _ToolkitError("Feature `%s` must be of type str" % f)
# Store options
options = {}
options['bm25_b'] = bm25_b
options['bm25_k1'] = bm25_k1
options['tfidf_threshold'] = tfidf_threshold
options['verbose'] = verbose
options['features'] = features
# Construct model
proxy = _gl.extensions._SearchIndex()
proxy.init_options(options)
proxy.index(data)
return SearchModel(proxy)
class SearchModel(_SDKModel):
"""
SearchModel objects can be used to search text data for a given query.
This model should not be constructed directly. Instead, use
:func:`graphlab.toolkits._internal.search.create` to create an
instance of this model.
"""
def __init__(self, model_proxy=None):
super(SearchModel, self).__init__(model_proxy)
self.__name__ = 'search'
def _get_wrapper(self):
_class = self.__proxy__.__class__
proxy_wrapper = self.__proxy__._get_wrapper()
def model_wrapper(unity_proxy):
model_proxy = proxy_wrapper(unity_proxy)
return SearchModel(model_proxy)
return model_wrapper
@classmethod
def _get_queryable_methods(cls):
'''Returns a list of method names that are queryable through Predictive
Service'''
return {'query': {}}
def get_current_options(self):
return self.__proxy__.get_current_options()
def __str__(self):
return self.__repr__()
def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, etc.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
data_fields = [
('Number of documents', 'num_documents'),
('Average tokens/document', 'average_document_length')]
param_ranking_fields = [
('BM25 k1', 'bm25_k1'),
('BM25 b', 'bm25_b'),
('TF-IDF threshold', 'tfidf_threshold')]
index_fields = [
('Number of unique tokens indexed', 'num_tokens'),
('Preprocessing time (s)', 'elapsed_processing'),
('Indexing time (s)', 'elapsed_indexing')]
section_titles = ['Corpus',
'Indexing settings',
'Index']
return ([data_fields,
param_ranking_fields,
index_fields],
section_titles)
def __repr__(self):
(sections, section_titles) = self._get_summary_struct()
return _toolkit_repr_print(self, sections,
section_titles, width=32)
def query(self, query, num_results=10,
expansion_k=5,
expansion_epsilon=0.1,
expansion_near_match_weight=.5):
"""
Search for text.
Parameters
----------
query: str
A string of text.
num_results : int
The number of results to return.
expansion_k : int
Maximum number of nearest words to include from query token.
expansion_epsilon : float
Maximum distance to allow between query token and nearby word when
doing query expansion. Must be between 0 and 1.
expansion_near_match_weight : float
Multiplier to use on BM25 scores for documents indexed via an
approximate match with a given token. This will be used for each of
the `expansion_k` words that are considered an approximate match.
Must be between 0 and 1.
Returns
-------
out: SFrame
The rows of the original SFrame along with a `score` column
which contains the BM25 score between this query and the row.
Examples
--------
>>> import graphlab as gl
>>> sf = gl.SFrame({'text': ['Hello my friend', 'I love this burrito']})
>>> s = gl.search.create(sf, features=['text'])
>>> s.query('burrito')
"""
if _sys.version_info.major == 2:
_raise_error_if_not_of_type(query, [str, unicode])
else:
_raise_error_if_not_of_type(query, [str])
q = query.split(' ')
results = self.__proxy__.query_index(q,
expansion_k=expansion_k,
expansion_epsilon=expansion_epsilon,
expansion_near_match_weight=expansion_near_match_weight)
results = self.__proxy__.join_query_result(results, method='default',
num_results=num_results)
return results
def _get_str_columns(sf):
"""
Returns a list of names of columns that are string type.
"""
return [name for name in sf.column_names() if sf[name].dtype() == str]
|
[
"hophamtenquang@gmail.com"
] |
hophamtenquang@gmail.com
|
01a4792e02f8e7a6911af7ab76554e70e0471d8b
|
694d57c3e512ce916269411b51adef23532420cd
|
/leetcode/215kth_largest_element.py
|
58f31f67f601ead30791f3de73429cab51b47b5f
|
[] |
no_license
|
clovery410/mycode
|
5541c3a99962d7949832a0859f18819f118edfba
|
e12025e754547d18d5bb50a9dbe5e725fd03fd9c
|
refs/heads/master
| 2021-05-16T02:46:47.996748
| 2017-05-10T23:43:50
| 2017-05-10T23:43:50
| 39,235,141
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,139
|
py
|
from random import randint
class Solution(object):
def partition(self, start, end, p, nums):
pivot = nums[p]
nums[p], nums[start] = nums[start], nums[p]
i = j = k = start
while j <= end:
if nums[j] == pivot:
nums[i], nums[j] = nums[j], nums[i]
i += 1
elif nums[j] < pivot:
nums[i], nums[j] = nums[j], nums[i]
nums[k], nums[i] = nums[i], nums[k]
i += 1
k += 1
j += 1
return k, i-1
def findKthLargest(self, nums, k):
n = len(nums)
target = n - k
mid_h, mid_e = self.partition(0, n - 1, randint(0, n-1), nums)
s, e = 0, n-1
while True:
if target >= mid_h - s and target <= mid_e - s:
return nums[mid_h]
elif target > mid_e - s:
r = randint(mid_e + 1, e)
mid_h, mid_e = self.partition(mid_e + 1, e, r, nums)
else:
r = randint(s, mid_h - 1)
mid_h, mid_e = self.partition(s, mid_h - 1, r, nums)
|
[
"seasoul410@gmail.com"
] |
seasoul410@gmail.com
|
d2441a8e7cd1131ccb87e1debc5c49e33fc62f90
|
2031771d8c226806a0b35c3579af990dd0747e64
|
/pyobjc-framework-SearchKit/setup.py
|
cfce0b4f572de3e7d3cb839fb83d76b5f34634ee
|
[
"MIT"
] |
permissive
|
GreatFruitOmsk/pyobjc-mirror
|
a146b5363a5e39181f09761087fd854127c07c86
|
4f4cf0e4416ea67240633077e5665f5ed9724140
|
refs/heads/master
| 2018-12-22T12:38:52.382389
| 2018-11-12T09:54:18
| 2018-11-12T09:54:18
| 109,211,701
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 495
|
py
|
'''
Deprecated wrappers for the "SearchKit" framework on macOS.
Use the CoreServices package instead.
'''
from pyobjc_setup import setup
VERSION="5.1.1"
setup(
name='pyobjc-framework-SearchKit',
description = "Wrappers for the framework SearchKit on macOS",
min_os_level='10.5',
packages = [ "SearchKit" ],
version=VERSION,
install_requires = [
'pyobjc-core>='+VERSION,
'pyobjc-framework-CoreServices>='+VERSION,
],
long_description=__doc__,
)
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
98e494f9969bfbe0e38927f5f5a9e9da3f675862
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/tjMNAEgkNvM5eyEqJ_9.py
|
009b0a6620c65bbe7a418ed56973cde1d60c4685
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 901
|
py
|
"""
You are given two inputs:
1. An array of abbreviations.
2. An array of words.
Write a function that returns `True` if each abbreviation **uniquely
identifies** a word, and `False` otherwise.
### Examples
unique_abbrev(["ho", "h", "ha"], ["house", "hope", "happy"]) ➞ False
// "ho" and "h" are ambiguous and can identify either "house" or "hope"
unique_abbrev(["s", "t", "v"], ["stamina", "television", "vindaloo"]) ➞ True
unique_abbrev(["bi", "ba", "bat"], ["big", "bard", "battery"]) ➞ False
unique_abbrev(["mo", "ma", "me"], ["moment", "many", "mean"]) ➞ True
### Notes
Abbreviations will be a substring from `[0, n]` from the original string.
"""
def unique_abbrev(abbs, words):
al = []
for x in abbs:
temp = []
for y in words:
temp.append(y.startswith(x))
al.append((temp))
return sum(al[0]+al[1]+al[2]) == 3
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
78bb3e33b198664933fcf0bc38618fc403aed04b
|
2393a8fabee3d39bf1623e26c0313e3351caf814
|
/python/study/IO/示例代码/nonblocking/nonblocking-client.py
|
bb4e00884ca6c2f8d4878a852bc366caf746f668
|
[] |
no_license
|
shidg/note
|
5d3aaff9d1c6cf87b89513b7712638c9b808653c
|
d46aceaed64e3e2f854149f71f18fa92d650dc37
|
refs/heads/master
| 2023-05-26T16:14:51.715966
| 2023-05-19T02:08:36
| 2023-05-19T02:08:36
| 27,533,612
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 335
|
py
|
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-#
'''
author: -- shidegang --
Created Time: 2019-08-28 10:43:17
'''
import socket
sk = socket.socket()
server_addr = ('127.0.0.1',9000)
sk.connect(server_addr)
while True:
sk.sendall('hello'.encode(encoding='utf8'))
data = sk.recv(1024)
print(data.decode(encoding='utf8'))
|
[
"shidg@feezu.cn"
] |
shidg@feezu.cn
|
bdd26c536928ecc4169204488d28c7ea79fac6d1
|
6e0d8d91dd22e2275cd713822679d5cabbc9331a
|
/thespian/system/__init__.py
|
2c69b0574347c752e0c9daedb76632e6daa45a22
|
[
"MIT"
] |
permissive
|
kquick/Thespian
|
711712eb0a9ad3370f1013c8393cc461b9541dfe
|
dfc6d3e865c05f929328b85e98671a5c8fc3a54a
|
refs/heads/master
| 2023-05-26T15:51:57.959690
| 2023-05-22T15:08:00
| 2023-05-22T15:08:00
| 78,292,621
| 203
| 32
|
MIT
| 2021-06-22T14:42:09
| 2017-01-07T17:18:27
|
Python
|
UTF-8
|
Python
| false
| false
| 636
|
py
|
# This module contains the various ActorSystemBase implementations
# upon which the ActorSystem operates.
from thespian.actors import ActorAddress, ActorSystemMessage, PoisonMessage
from thespian.system.addressManager import *
from thespian.system.messages.status import *
from thespian.system.messages.convention import *
def isInternalActorSystemMessage(msg):
if isinstance(msg, PoisonMessage):
msg = msg.poisonMessage
return isinstance(msg, ActorSystemMessage) and \
not isinstance(msg, (Thespian_SystemStatus,
Thespian_ActorStatus,
PoisonMessage))
|
[
"kquick@godaddy.com"
] |
kquick@godaddy.com
|
3e9fbcc82ac9735647bbbf58624023d9b3049086
|
7f368b275cd18a5b7b2eb22b822223252914c8ef
|
/tensorflow_gan/python/tpu/cross_replica_ops.py
|
7ab35260b93b8be210c6cb4f9caf314cc746b313
|
[
"Apache-2.0"
] |
permissive
|
nivedwho/gan
|
176c624800378d9dfa9f74211c362b62953cc7f1
|
723ce1e3627778b979f048d817f834f253611ff4
|
refs/heads/master
| 2023-08-01T08:07:34.299917
| 2021-09-14T04:10:38
| 2021-09-14T04:11:37
| 396,680,181
| 0
| 0
|
Apache-2.0
| 2021-08-16T07:44:33
| 2021-08-16T07:44:33
| null |
UTF-8
|
Python
| false
| false
| 3,128
|
py
|
# coding=utf-8
# Copyright 2021 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow operations specific to TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow as tf
from tensorflow.python.tpu import tpu_function # pylint: disable=g-direct-tensorflow-import
__all__ = [
'cross_replica_mean',
'cross_replica_moments',
]
def cross_replica_mean(inputs, group_size=None):
"""Calculates the average value of inputs tensor across TPU replicas."""
num_replicas = tpu_function.get_tpu_context().number_of_shards
if not group_size:
group_size = num_replicas
if group_size == 1:
return inputs
if group_size != num_replicas:
group_assignment = []
assert num_replicas % group_size == 0
for g in range(num_replicas // group_size):
replica_ids = [g * group_size + i for i in range(group_size)]
group_assignment.append(replica_ids)
else:
group_assignment = None
return tf.compat.v1.tpu.cross_replica_sum(inputs, group_assignment) / tf.cast(
group_size, inputs.dtype)
def cross_replica_moments(inputs, axis, parallel=True, group_size=None):
"""Compute mean and variance of the inputs tensor across TPU replicas.
Args:
inputs: A tensor with 2 or more dimensions.
axis: Array of ints. Axes along which to compute mean and variance.
parallel: Use E[x^2] - (E[x])^2 to compute variance. This can be done
in parallel to computing the mean and reducing the communication overhead.
group_size: Integer, the number of replicas to compute moments arcoss.
None or 0 will use all replicas (global).
Returns:
Two tensors with mean and variance.
"""
# Compute local mean and then average across replicas.
mean = tf.math.reduce_mean(input_tensor=inputs, axis=axis)
mean = cross_replica_mean(mean)
if parallel:
# Compute variance using the E[x^2] - (E[x])^2 formula. This is less
# numerically stable than the E[(x-E[x])^2] formula, but allows the two
# cross-replica sums to be computed in parallel, saving communication
# overhead.
mean_of_squares = tf.reduce_mean(input_tensor=tf.square(inputs), axis=axis)
mean_of_squares = cross_replica_mean(mean_of_squares, group_size=group_size)
mean_squared = tf.square(mean)
variance = mean_of_squares - mean_squared
else:
variance = tf.math.reduce_mean(
input_tensor=tf.math.square(inputs - mean), axis=axis)
variance = cross_replica_mean(variance, group_size=group_size)
return mean, variance
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
c4ae49b5ef3dff9cda6859483ab61b793df6c6e4
|
90e6860b5370b742f01c0664ac84f14dc1272155
|
/src/ziggurat/config/StandardConfigurator.py
|
fdce59892f864b753760a8508b5a30a278cc7f28
|
[] |
no_license
|
sernst/Ziggurat
|
e63f876b8f2cb3f78c7a7a4dcf79af810a540722
|
4ae09bbd9c467b2ad740e117ed00354c04951e22
|
refs/heads/master
| 2021-01-17T07:20:17.138440
| 2016-05-27T14:27:43
| 2016-05-27T14:27:43
| 9,278,283
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,944
|
py
|
# StandardConfigurator.py
# (C)2013
# Scott Ernst
from __future__ import print_function, absolute_import, unicode_literals, division
import re
from pyaid.string.StringUtils import StringUtils
from pyramid.config import Configurator
from pyaid.file.FileUtils import FileUtils
#___________________________________________________________________________________________________ StandardConfigurator
class StandardConfigurator(Configurator):
"""A class for..."""
#===================================================================================================
# C L A S S
_REST_PATTERN = re.compile('\*[A-Za-z0-9]+$')
_DEFAULT_SETTINGS = {
'host':'0.0.0.0',
'port':6543,
'pyramid.reload_templates':True,
'pyramid.debug_authorization':False,
'pyramid.debug_notfound':False,
'pyramid.debug_routematch':False,
'pyramid.debug_templates':True,
'pyramid.default_locale_name':'en',
'pyramid.includes':'pyramid_tm',
'mako.input_encoding':'utf-8' }
#___________________________________________________________________________________________________ __init__
def __init__(self, app, rootViewPackage =None, **kwargs):
"""Creates a new instance of StandardConfigurator."""
super(StandardConfigurator, self).__init__(**kwargs)
self._isPopulated = False
self._app = app
self._rootViewPackage = rootViewPackage
self.add_request_method(
self._getMyAppRequestProperty,
StringUtils.toStrStr('ziggurat'), reify=True)
#===================================================================================================
# G E T / S E T
#___________________________________________________________________________________________________ GS: rootViewPackage
@property
def rootViewPackage(self):
return self._rootViewPackage
#___________________________________________________________________________________________________ GS: makoRootTemplatePath
@property
def makoRootTemplatePath(self):
return FileUtils.createPath(self._app.rootPath, 'templates', 'mako', isDir=True)
#___________________________________________________________________________________________________ GS: makoModuleDirectory
@property
def makoModuleDirectory(self):
return FileUtils.createPath(self._app.rootPath, 'operations', 'mako', isDir=True)
#===================================================================================================
# P U B L I C
#___________________________________________________________________________________________________ populateConfigs
def populateConfigs(self):
if self._isPopulated:
return
self._isPopulated = True
self._populateRoutes()
settings = dict(self._DEFAULT_SETTINGS.items())
p = self.makoRootTemplatePath
if p:
settings['mako.directories'] = p
p = self.makoModuleDirectory
if p:
settings['mako.module_directory'] = p
self._populateSettings(settings)
self.add_settings(settings)
#___________________________________________________________________________________________________ addRouteItem
def addRouteItem(self, name, pattern, className, renderer =None, package =None, subpackage =None):
"""Adds a route to the registry."""
# Adds optional end slash argument to URLs that don't enforce an end slash themselves
if not pattern.endswith('/'):
if self._REST_PATTERN.search(pattern) is None:
pattern += '{endSlash:[/]*}'
importDef = [className, className]
if subpackage:
importDef.insert(0, subpackage)
importDef.insert(0, package if package else self.rootViewPackage)
self.add_route(name, pattern)
self.add_view('.'.join(importDef), route_name=name, renderer=renderer)
#___________________________________________________________________________________________________ addStaticRouteItem
def addStaticRouteItem(self, name, path):
self.add_static_view(name=name, path=path)
#===================================================================================================
# P R O T E C T E D
#___________________________________________________________________________________________________ _getMyAppRequestProperty
def _getMyAppRequestProperty(self, request):
return self._app
#___________________________________________________________________________________________________ _populateSettings
def _populateSettings(self, settings):
pass
#___________________________________________________________________________________________________ _populateRoutes
def _populateRoutes(self):
"""Doc..."""
pass
#===================================================================================================
# I N T R I N S I C
#___________________________________________________________________________________________________ __repr__
def __repr__(self):
return self.__str__()
#___________________________________________________________________________________________________ __unicode__
def __unicode__(self):
return StringUtils.toUnicode(self.__str__())
#___________________________________________________________________________________________________ __str__
def __str__(self):
return '<%s>' % self.__class__.__name__
|
[
"swernst@gmail.com"
] |
swernst@gmail.com
|
ddf8e310ace1ebb6773c14c882d812f973ffa1af
|
4b4828d3c98d76d7bf38f90a015945acc408ddc5
|
/PythonAI/Source/W2D3/src/bmi_web.py
|
441f43d452b0cdebf5e3e9a87e8c79e84ae2551b
|
[] |
no_license
|
Huh-jae-won/Study
|
cb5d32728e8dcded492e7edb054b500c91ec607c
|
e4dbc3fef69bb273b62b866fb5ef2a7250222f10
|
refs/heads/main
| 2023-06-20T13:06:26.691899
| 2021-07-11T07:43:41
| 2021-07-11T07:43:41
| 362,759,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,199
|
py
|
"""
# URL : http://localhost:8080/cgi-bin/bmi_web.py
"""
# 모듈 로딩 ---------------------------------------------------
import cgi, sys, codecs, os
import joblib
# WEB 인코딩 설정 ---------------------------------------------
sys.stdout=codecs.getwriter('utf-8')(sys.stdout.detach())
# 함수 선언 --------------------------------------------------
# WEB 페이지 출력 --------------------------------------------
def displayWEB(detect_msg):
print("Content-Type: text/html; charset=utf-8")
print("")
html="""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>비만 여부 판별</title>
</head>
<body align="center">
<h2>[ 비만 상태 체크 ]</h2>
<form>
<div style='text-align:center; background-color:#D5D5D5;border-radius:10px;width:60%; margin: auto;padding:50px;'>
<input id="height" type="text" placeholder="키" name="height">   
<input id="weight" type="text" placeholder="몸무게" name="weight">
<input type="submit" value="판정"></br>
<p><font color='blue'>{}</font></p>
</div>
</form></body></html>""".format(detect_msg)
print(html)
# 판정 --------------------------------------------------------
def detect_bmi(w, h):
w = int(w)
h = int(h)
# 비만도 예측하기
res = clf.predict([[w / 100, h / 200]])
return str(res[0])
# 기능 구현 -----------------------------------------------------
# (1) 학습 데이터 읽기
pklfile = os.path.dirname(__file__) + "/bmi.pkl"
clf = joblib.load(pklfile)
# (2) WEB 페이지 <Form> -> <INPUT> 리스트 가져오기
form = cgi.FieldStorage()
height_value = form.getvalue('height')
weight_value = form.getvalue('weight')
# (3) 판정 하기
if height_value is not None and weight_value is not None:
bmi_dic = {"fat": "과체중", "normal": "정상체중", "thin": "저체중"}
result = detect_bmi(weight_value, height_value)
result = '키 {}, 몸무게 {} => {}입니다.'.format(height_value, weight_value, bmi_dic[result])
else:
result ='측정된 결과가 없습니다.'
# (4) WEB 출력하기
displayWEB(result)
|
[
"dfr9034@naver.com"
] |
dfr9034@naver.com
|
43bd61d034275b4e72d5fd73ddf6e07f646548ed
|
85f68b427bf9c4b8b5c3f8a70dccc217226e9706
|
/gam_app/old_migrations/0012_auto_20180619_1641.py
|
7de987d6f30f3cd7be49de7d4ff093f9f154c292
|
[] |
no_license
|
benrogboe/GAM
|
ffb87e76a87aa7eaf0d0d33d4df7ddc571399e49
|
fbec7cb967252578d4669c5ff91a9b0b9cdfd9d5
|
refs/heads/master
| 2020-03-30T05:08:38.811097
| 2018-09-28T18:55:24
| 2018-09-28T18:55:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 737
|
py
|
# Generated by Django 2.0.1 on 2018-06-19 16:41
import ckeditor.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('gam_app', '0011_auto_20180619_1527'),
]
operations = [
migrations.AlterField(
model_name='imagen',
name='archivo',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='gam_app.Archivo'),
),
migrations.AlterField(
model_name='imagen',
name='colección',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='gam_app.Colección'),
),
]
|
[
"apjanco@gmail.com"
] |
apjanco@gmail.com
|
c4b0fa6a10dd0233f06a512fb6746c6c4f0b86d7
|
b17fda8e3a9f360cbab8e8ed0ecd66b03787250a
|
/.venv/lib/python2.7/site-packages/planemo/templates.py
|
caa25724a0252f54837ada1ffbff3f78b82341b4
|
[
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
maumauleon/galaxy-irri-dev
|
7a4b824c90474da0a2f3a3b858777c5089b9d5cb
|
063bf0dca5d465466aefa77edaf47df12c4ff932
|
refs/heads/master
| 2022-11-16T03:10:18.067196
| 2017-08-23T03:31:01
| 2017-08-23T03:31:01
| 98,497,124
| 1
| 2
|
NOASSERTION
| 2022-11-01T17:00:32
| 2017-07-27T05:25:40
|
Python
|
UTF-8
|
Python
| false
| false
| 478
|
py
|
try:
from jinja2 import Template
except ImportError:
Template = None
NO_JINJA2_MESSAGE = ("This functionality requires Jinja2 but this library is "
"unavailable. Install with `pip install jinja2`.")
def render(template_str, **kwds):
""" Use jinja2 to render a template
"""
if Template is None:
raise Exception(NO_JINJA2_MESSAGE)
template = Template(template_str)
contents = template.render(**kwds)
return contents
|
[
"v.juanillas@irri.org"
] |
v.juanillas@irri.org
|
dd722e77341a10ff56977e18b26a3b12366106a6
|
7729ddbb2e4eb03469cd19f2ac6b5670b831923b
|
/src/seraing/urban/dataimport/__init__.py
|
626639e80ca9ee026c47dbf2d06065f7a7893534
|
[] |
no_license
|
IMIO/seraing.urban.dataimport_22
|
5cd7bb6e09debeb72145af107b99997ba54f96a3
|
db2f3c596572159692fa6cb11050111d1cb0fca5
|
refs/heads/master
| 2021-05-05T17:01:27.788747
| 2017-09-12T13:45:01
| 2017-09-12T13:45:01
| 103,239,753
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
# -*- coding: utf-8 -*-
"""Init and utils."""
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('seraing.urban.dataimport')
def initialize(context):
"""Initializer called when used as a Zope 2 product."""
|
[
"julien.jaumotte@imio.be"
] |
julien.jaumotte@imio.be
|
a697246864c5f020df2a2b5b60c9e4a429c0d160
|
7f53c41182a6d9c5da0c58a15716f01725ac0316
|
/2019_2_19_public_test/test.py
|
e79247064e8a5dfa2e00c40dbbe822ef17f75f3b
|
[] |
no_license
|
1286211699/2019_1_23_pub_test
|
f6b7ee089e78ad673c56b3cd4ccee9b2154581f6
|
3aed7f4941353d48bf3407e9d30ac85c83b0ed7b
|
refs/heads/master
| 2022-12-19T14:41:15.264627
| 2019-03-21T09:46:08
| 2019-03-21T09:46:08
| 167,125,649
| 1
| 0
| null | 2022-12-08T01:33:30
| 2019-01-23T05:54:52
|
HTML
|
UTF-8
|
Python
| false
| false
| 666
|
py
|
import _thread
import time
# 为线程定义一个函数
def print_time( threadName, delay):
count = 0
while count < 5:
time.sleep(delay)
count += 1
print ("%s: %s" % ( threadName, time.ctime(time.time()) ))
# 创建两个线程
#但是这个模块我们不推荐,因为底层封装的时候它的主线程不会等待子线程的结束!
#官方以及我们推荐再封装Threading,所以在这里大家了解下
try:
_thread.start_new_thread( print_time,("Thread-1", 2, ) )
_thread.start_new_thread( print_time,("Thread-2", 4, ) )
except:
print ("Error: 无法启动线程")
while True:
pass
|
[
"1286211699@qq.com"
] |
1286211699@qq.com
|
00f569ae77237d2d80518fa93f5d1e27c4d3b867
|
f9632d4a822f6525a007598a1f67757ac9891749
|
/rakuten/playground/extract_category.py
|
7b023d96f6665916c58bdc27b25ebb3531856c12
|
[] |
no_license
|
rpryzant/marketing
|
ab055a5ae02ed287cb5a763d3e937efa72d057df
|
9f463da0a2e7c9c48951e793d95534963cd19721
|
refs/heads/master
| 2021-09-07T01:20:31.720381
| 2018-02-14T22:13:40
| 2018-02-14T22:13:40
| 116,716,301
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,074
|
py
|
"""
extracts a category from the data
"""
import map_reduce
from collections import defaultdict
# make parent dicts
genre_tsv = '/Volumes/datasets/rakuten_dump/merchandize_data/genres/genres_with_english.tsv'
id_to_pid = {}
for l in open(genre_tsv):
l = l.strip()
try:
[id, ja_name, pid, en_name] = l.split('\t')
except:
continue
id_to_pid[id] = pid
# the category you want to extract
# from fashion_categories
CATEGORIES_TO_EXTRACT = [
'502456', '100454', '502427', '100472', '110933', '502368', '100939', '100433', '216129',
'550009', '111103', '207733', '205193', '551648', '551648', '206587', '303816', '206591',
'206590', '303803', '551441', '551409', '551433', '551403', '551445', '551413', '551682',
'551668', '551648', '551664', '551644', '551677', '551672', '551652', '205197', '200807',
'207699', '100542', '100371', '558929', '204994', '402513' '402517', '402515', '508925',
'508930', '501642', '402087', '201780', '302242', '204982', '201794', '302464', '407933',
'502027', '402463', '402475', '501965', '501962', '501963', '501976', '506410', '200859'
]
def is_child(id):
# is id the child of CATEGORY_TO_EXTRACT?
while id in id_to_pid:
if id in CATEGORIES_TO_EXTRACT:
return True
id = id_to_pid[id]
return False
def map_fn(path):
out = open(path + '.OUT', 'a')
for l in open(path):
parts = l.strip().split("\t")
genre_id = parts[-1]
if is_child(genre_id):
out.write(l.strip() + '\n')
return
def reduce_fn(result_list):
return ''
map_reduce.mapreduce(
map_fn=map_fn,
reduce_fn=reduce_fn,
# input_re='/scr/rpryzant/marketing/rakuten/data/products_tsv/*.tsv',
input_re='/Volumes/datasets/rakuten_dump/merchandize_data/products_tsv/*.tsv',
output_filename='/scr/rpryzant/marketing/rakuten/categories',
n_cores=2
)
|
[
"rapigan@gmail.com"
] |
rapigan@gmail.com
|
21a03c5b4c4fdf5f65f8f33de569e2d41869d67b
|
325fde42058b2b82f8a4020048ff910cfdf737d7
|
/src/account/azext_account/vendored_sdks/subscription/operations/_subscription_operation_operations.py
|
ff4eaf571ec7c44d355033680ae253d44a157678
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
ebencarek/azure-cli-extensions
|
46b0d18fe536fe5884b00d7ffa30f54c7d6887d1
|
42491b284e38f8853712a5af01836f83b04a1aa8
|
refs/heads/master
| 2023-04-12T00:28:44.828652
| 2021-03-30T22:34:13
| 2021-03-30T22:34:13
| 261,621,934
| 2
| 5
|
MIT
| 2020-10-09T18:21:52
| 2020-05-06T01:25:58
|
Python
|
UTF-8
|
Python
| false
| false
| 4,364
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class SubscriptionOperationOperations(object):
"""SubscriptionOperationOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Version of the API to be used with the client request. Current version is 2019-10-01-preview. Constant value: "2019-10-01-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2019-10-01-preview"
self.config = config
def get(
self, operation_id, custom_headers=None, raw=False, **operation_config):
"""Get the status of the pending Microsoft.Subscription API operations.
:param operation_id: The operation ID, which can be found from the
Location field in the generate recommendation response header.
:type operation_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SubscriptionCreationResult or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.subscription.models.SubscriptionCreationResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'operationId': self._serialize.url("operation_id", operation_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
header_dict = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SubscriptionCreationResult', response)
header_dict = {
'Location': 'str',
'Retry-After': 'int',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
get.metadata = {'url': '/providers/Microsoft.Subscription/subscriptionOperations/{operationId}'}
|
[
"noreply@github.com"
] |
ebencarek.noreply@github.com
|
47d15cbb5b377278a0596f903530d487f4f3cc6c
|
1b512092052c8fe7f6919ee870431ac7b3a65f66
|
/pal/examples/MixedHalidesBE2/run_simple_misokg.py
|
77cd6d71fbb4de358efaf37a34fd2962c948ae5b
|
[] |
no_license
|
ClancyLab/PAL
|
d7b9dd1caeb62d363041b8e4c7f402d6edbf741e
|
cb0ef048de37014922b943ae6b5eaffd3d43da63
|
refs/heads/master
| 2022-02-25T05:31:20.590421
| 2019-10-14T19:47:10
| 2019-10-14T19:47:10
| 210,862,362
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,151
|
py
|
from pal.opt import Optimizer
import pal.utils.strings as pal_strings
from pal.constants.solvents import solvents
from pal.kernels.matern import maternKernel52 as mk52
# from pal.objectives.binding_energy import get_binding_energy as BE
from pal.acquisition.misokg import getNextSample_misokg
from pal.stats.MLE import MLE
from pal.stats.MAP import MAP
import os
import copy
# import random
import numpy as np
import cPickle as pickle
def run_misokg(run_index):
# Store data for debugging
IS0 = pickle.load(open("enthalpy_N1_R3_Ukcal-mol", 'r'))
IS1 = pickle.load(open("enthalpy_N1_R2_Ukcal-mol", 'r'))
# Generate the main object
sim = Optimizer()
# Assign simulation properties
#sim.hyperparameter_objective = MAP
sim.hyperparameter_objective = MLE
###################################################################################################
# File names
sim.fname_out = "enthalpy_misokg.dat"
sim.fname_historical = None
# Information sources, in order from expensive to cheap
sim.IS = [
lambda h, c, s: -1.0 * IS0[' '.join([''.join(h), c, s])],
lambda h, c, s: -1.0 * IS1[' '.join([''.join(h), c, s])]
]
sim.costs = [
1.0,
0.1
]
sim.logger_fname = "data_dumps/%d_misokg.log" % run_index
if os.path.exists(sim.logger_fname):
os.system("rm %s" % sim.logger_fname)
os.system("touch %s" % sim.logger_fname)
sim.obj_vs_cost_fname = "data_dumps/%d_misokg.dat" % run_index
sim.mu_fname = "data_dumps/%d_mu_misokg.dat" % run_index
sim.sig_fname = "data_dumps/%d_sig_misokg.dat" % run_index
sim.combos_fname = "data_dumps/%d_combos_misokg.dat" % run_index
sim.hp_fname = "data_dumps/%d_hp_misokg.dat" % run_index
sim.acquisition_fname = "data_dumps/%d_acq_misokg.dat" % run_index
sim.save_extra_files = True
########################################
# Override the possible combinations with the reduced list of IS0
# Because we do this, we should also generate our own historical sample
combos_no_IS = [k[1] + "Pb" + k[0] + "_" + k[2] for k in [key.split() for key in IS0.keys()]]
sim.historical_nsample = 10
choices = np.random.choice(combos_no_IS, sim.historical_nsample, replace=False)
tmp_data = pal_strings.alphaToNum(
choices,
solvents,
mixed_halides=True,
name_has_IS=False)
data = []
for IS in range(len(sim.IS)):
for i, d in enumerate(tmp_data):
h, c, _, s, _ = pal_strings.parseName(pal_strings.parseNum(d, solvents, mixed_halides=True, num_has_IS=False), name_has_IS=False)
c = c[0]
data.append([IS] + d + [sim.IS[IS](h, c, s)])
sim.fname_historical = "data_dumps/%d.history" % run_index
pickle.dump(data, open(sim.fname_historical, 'w'))
simple_data = [d for d in data if d[0] == 0]
pickle.dump(simple_data, open("data_dumps/%d_reduced.history" % run_index, 'w'))
########################################
sim.n_start = 10 # The number of starting MLE samples
sim.reopt = 20
sim.ramp_opt = None
sim.parallel = False
# Possible compositions by default
sim.A = ["Cs", "MA", "FA"]
sim.B = ["Pb"]
sim.X = ["Cl", "Br", "I"]
sim.solvents = copy.deepcopy(solvents)
sim.S = list(set([v["name"] for k, v in sim.solvents.items()]))
sim.mixed_halides = True
sim.mixed_solvents = False
# Parameters for debugging and overwritting
sim.debug = False
sim.verbose = True
sim.overwrite = True # If True, warning, else Error
sim.acquisition = getNextSample_misokg
# Functional forms of our mean and covariance
# MEAN: 4 * mu_alpha + mu_zeta
# COV: sig_alpha * |X><X| + sig_beta * I_N + sig_zeta + MaternKernel(S, weights, sig_m)
SCALE = [2.0, 4.0][int(sim.mixed_halides)]
# _1, _2, _3 used as dummy entries
def mean(X, Y, theta):
mu = np.array([SCALE * theta.mu_alpha + theta.mu_zeta for _ in Y])
return mu
sim.mean = mean
def cov_old(X, Y, theta):
A = theta.sig_alpha * np.dot(np.array(X)[:, 1:-3], np.array(X)[:, 1:-3].T)
B = theta.sig_beta * np.diag(np.ones(len(X)))
C = theta.sig_zeta
D = mk52(np.array(X)[:, -3:-1], [theta.l1, theta.l2], theta.sig_m)
return theta.rho_matrix(X) * (A + B + C + D)
def cov(X0, Y, theta):
A = theta.sig_alpha * np.dot(np.array(X0)[:, :-3], np.array(X0)[:, :-3].T)
B = theta.sig_beta * np.diag(np.ones(len(X0)))
C = theta.sig_zeta
D = mk52(np.array(X0)[:, -3:-1], [theta.l1, theta.l2], theta.sig_m)
Kx = A + B + C + D
L = np.array([
np.array([theta.rho[str(sorted([i, j]))] if i >= j else 0.0 for j in range(theta.n_IS)])
for i in range(theta.n_IS)
])
# Normalize L to stop over-scaling values small
if theta.normalize_L:
L = L / np.linalg.norm(L)
# Force it to be positive semi-definite
Ks = L.dot(L.T)
if theta.normalize_Ks:
Ks = Ks / np.linalg.norm(Ks)
e = np.diag(np.array([theta.e1, theta.e2]))
Ks = e.dot(Ks.dot(e))
return np.kron(Ks, Kx)
sim.cov = cov
sim.theta.bounds = {}
sim.theta.mu_alpha, sim.theta.bounds['mu_alpha'] = None, (1E-3, lambda _, Y: max(Y))
sim.theta.sig_alpha, sim.theta.bounds['sig_alpha'] = None, (1E-2, lambda _, Y: 10.0 * np.var(Y))
sim.theta.sig_beta, sim.theta.bounds['sig_beta'] = None, (1E-2, lambda _, Y: 10.0 * np.var(Y))
sim.theta.mu_zeta, sim.theta.bounds['mu_zeta'] = None, (1E-3, lambda _, Y: max(Y))
sim.theta.sig_zeta, sim.theta.bounds['sig_zeta'] = None, (1E-2, lambda _, Y: 10.0 * np.var(Y))
sim.theta.sig_m, sim.theta.bounds['sig_m'] = None, (1E-2, lambda _, Y: np.var(Y))
sim.theta.l1, sim.theta.bounds['l1'] = None, (1E-1, 1)
sim.theta.l2, sim.theta.bounds['l2'] = None, (1E-1, 1)
sim.theta.e1, sim.theta.bounds['e1'] = None, (1E-1, 1.0)
sim.theta.e2, sim.theta.bounds['e2'] = None, (1E-1, 1.0)
# # NOTE! This is a reserved keyword in misoKG. We will generate a list of the same length
# # of the information sources, and use this for scaling our IS.
sim.theta.rho = {"[0, 0]": 1.0, "[0, 1]": 0.96, "[1, 1]": 1.0}
#sim.theta.rho = {"[0, 0]": 1.0}
#sim.theta.rho = {"[0, 0]": None, "[0, 1]": None, "[1, 1]": None}
sim.theta.bounds['rho [0, 0]'] = (1E-1, 1E1)
# sim.theta.bounds['rho [0, 1]'] = (1E-1, 1E1)
# sim.theta.bounds['rho [1, 1]'] = (1E-1, 1E1)
sim.theta.bounds['rho [0, 0]'] = (0.1, 1.0)
sim.theta.bounds['rho [0, 1]'] = (0.1, 1.0)
sim.theta.bounds['rho [1, 1]'] = (0.1, 1.0)
sim.theta.set_hp_names()
sim.primary_rho_opt = False
sim.update_hp_only_with_IS0 = False
sim.update_hp_only_with_overlapped = False
sim.theta.normalize_L = False
sim.theta.normalize_Ks = False
# This was a test feature that actually over-wrote rho to be PSD
# sim.force_rho_psd = True
###################################################################################################
# Start simulation
sim.run()
|
[
"hherbol@gmail.com"
] |
hherbol@gmail.com
|
9d9c54c167c1d1608999cca9cd3f8deb88c08f87
|
f7a718425de1447836b547f831a120937f1fcf40
|
/plumbum/util/datefmt.py
|
86b8b0bd0033ccb5a4c68ba1d8edc9352eb63e63
|
[
"BSD-3-Clause"
] |
permissive
|
coyotevz/plumbum-old-1
|
ad8ce697ffb4cbd0a6f238f66a1c546800e47024
|
c0f769ca525298ab190592d0997575d917a4bed4
|
refs/heads/master
| 2021-01-20T10:50:32.516766
| 2016-11-18T04:20:32
| 2016-11-18T04:20:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
# -*- coding: utf-8 -*-
import os
import time
from datetime import datetime
# date/time utilities
if os.name == 'nt':
raise NotImplementedError("Not yet implemented for this platform")
else:
time_now, datetime_now = time.time, datetime.now
|
[
"augusto@rioplomo.com.ar"
] |
augusto@rioplomo.com.ar
|
908203b5cd69481a591c3a236d23ab58bfe761cd
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_321/ch130_2020_04_01_16_55_20_162558.py
|
b1a491a59ae05eb3b4d84e3ac5803ce5f576c87e
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
def monta_mala(l):
i = 0
mala = []
while sum(mala) <= 23:
if sum(mala) + l[0] > 23:
break
else:
mala.append(l[i])
i +=1
return mala
|
[
"you@example.com"
] |
you@example.com
|
4302c2a92d3fd0e16720b5d0bb2c81e469aa422d
|
71e8bdddd84338bbb2d77934351d76251c2fd77d
|
/unique-paths.py
|
39eeaaf353b483ccb782eeba05930f71fcbd9851
|
[] |
no_license
|
onestarshang/leetcode
|
3da20fbec1b42d3565eb95a64ea3f30c29f1e1eb
|
0a7aa09a2b95e4caca5b5123fb735ceb5c01e992
|
refs/heads/master
| 2021-01-09T06:00:06.018037
| 2016-12-17T16:17:49
| 2016-12-17T16:17:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 898
|
py
|
#coding: utf-8
'''
http://oj.leetcode.com/problems/unique-paths/
A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
The robot can only move either down or right at any point in time. The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).
How many possible unique paths are there?
Above is a 3 x 7 grid. How many possible unique paths are there?
Note: m and n will be at most 100.
'''
class Solution:
# @return an integer
def uniquePaths(self, m, n):
if m == 0 or n == 0:
return 0
d = [[0 for i in range(n)] for j in range(m)]
for i in range(m):
for j in range(n):
if i == 0 or j == 0:
d[i][j] = 1
else:
d[i][j] = d[i - 1][j] + d[i][j - 1]
return d[m - 1][n - 1]
|
[
"irachex@gmail.com"
] |
irachex@gmail.com
|
1b5cfbe1f3042ab381911ffa943576eb5a6a5208
|
32904d4841d104143ba0f41cc3aeb749e470f546
|
/backend/django/apps/memos/migrations/0008_auto_20191025_2003.py
|
3fdde9fa6cac56f3d36a37dc33c06ac8382c74cb
|
[] |
no_license
|
aurthurm/dispatrace-api-vuejs
|
20ec5deee015e69bce7a64dc2d89ccae8941b800
|
56d122318af27ff64755fc515345974631d3026f
|
refs/heads/master
| 2023-01-23T23:03:15.438339
| 2020-10-20T22:09:29
| 2020-10-20T22:09:29
| 219,028,985
| 0
| 1
| null | 2022-12-22T18:31:38
| 2019-11-01T17:08:35
|
Vue
|
UTF-8
|
Python
| false
| false
| 540
|
py
|
# Generated by Django 2.2.6 on 2019-10-25 18:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('memos', '0007_memoattachment_memocomment'),
]
operations = [
migrations.AlterField(
model_name='memoattachment',
name='memo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='memoattachment_attachment', to='memos.Memo'),
),
]
|
[
"aurthurmusendame@gmail.com"
] |
aurthurmusendame@gmail.com
|
33e6fb56b398cd6635d41be61972d9290f4fa7f1
|
cdd79cef15bdf6a0b9098e27028bbe38607bc288
|
/数論/Combination/mcomb.py
|
0f7e575c0fe0a8647ae6beff5f8fa66747094a11
|
[] |
no_license
|
nord2sudjp/atcoder
|
ee35a3eb35717485dc62627172de24c9dac102fb
|
6b1cc5102a615492cc7ff8a33813bbb954641782
|
refs/heads/master
| 2023-08-25T11:27:14.205593
| 2021-09-27T05:43:04
| 2021-09-27T05:43:04
| 302,855,505
| 0
| 0
| null | null | null | null |
SHIFT_JIS
|
Python
| false
| false
| 654
|
py
|
# https://atcoder.jp/contests/abc145/submissions/10775904
def comb(n,r,mod):
# nからr通りを選択する
# modは素数であること
if n<r:return 0
k=min(r,n-r)
C=1
for i in range(1,k+1):
C=(C*(n+1-i)*pow(i,mod-2,mod))%mod
return C
print(com(n+m,n,mod))
#
def comb_cal(n):
s=1
m=0
for i in range(n):
s*=2
m+=s-1
#print(s-1)
return((s-1,m))
i,j=comb_cal(3)
print(i,j)
'''
1: 1 : 1
2: 2 1 : 3
3: 3 3 1 : 7
4: 4 6 4 1 : 15
5: 5 10 10 5 1 : 31
6: 6 15 20 15 6 1 : 63
7: 7 21 35 35 21 7 1 : 127
8: 8 28 56 70 56 28 8 1 : 255
9: 9 36 84 126 126 84 36 9 1 : 511
1013
'''
|
[
"nord2sudjp@gmail.com"
] |
nord2sudjp@gmail.com
|
4625b7562e6935395144e1da64a15c0b078f999e
|
52b5773617a1b972a905de4d692540d26ff74926
|
/triangle.py
|
aabaa8b43b28d0f6063839f8844acbb0a8568919
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 318
|
py
|
# this function is meant to print a triangle
def triangle():
# outer loop is for the rows --> 4
for i in range(0,4):
# inner loop is for colums --> 4
for j in range(0, i+1):
print("*",end= " ")
print("\r")
triangle()
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
234fe0703bcd32e0a8e3cea1e43969c845b3ac6e
|
ba0cbdae81c171bd4be7b12c0594de72bd6d625a
|
/MyToontown/Toontown2016/toontown/toonbase/ToontownStartDist.py
|
6718c47b2d8ad07f5cb8e4b83442d6bf516c3334
|
[] |
no_license
|
sweep41/Toontown-2016
|
65985f198fa32a832e762fa9c59e59606d6a40a3
|
7732fb2c27001264e6dd652c057b3dc41f9c8a7d
|
refs/heads/master
| 2021-01-23T16:04:45.264205
| 2017-06-04T02:47:34
| 2017-06-04T02:47:34
| 93,279,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,538
|
py
|
# This is the "main" module that will start a distribution copy of
# Toontown 2016
# Replace some modules that do exec:
import collections
collections.namedtuple = lambda *x: tuple
# This is included in the package by the prepare_client script. It contains the
# PRC file data, (stripped) DC file, and time zone info:
import game_data
# Load all of the packaged PRC config page(s):
from pandac.PandaModules import *
for i, config in enumerate(game_data.CONFIG):
name = 'GameData config page #{0}'.format(i)
loadPrcFileData(name, config)
# The VirtualFileSystem, which has already initialized, doesn't see the mount
# directives in the config(s) yet. We have to force it to load them manually:
vfs = VirtualFileSystem.getGlobalPtr()
mounts = ConfigVariableList('vfs-mount')
for mount in mounts:
mountFile, mountPoint = (mount.split(' ', 2) + [None, None, None])[:2]
mountFile = Filename(mountFile)
mountFile.makeAbsolute()
mountPoint = Filename(mountPoint)
vfs.mount(mountFile, mountPoint, 0)
# To read the DC file as a StringStream, we must override the ClientRepository:
dcStream = StringStream(game_data.DC)
from direct.distributed import ConnectionRepository
import types
class ConnectionRepository_override(ConnectionRepository.ConnectionRepository):
def readDCFile(self, dcFileNames=None):
dcFile = self.getDcFile()
dcFile.clear()
self.dclassesByName = {}
self.dclassesByNumber = {}
self.hashVal = 0
dcImports = {}
readResult = dcFile.read(dcStream, 'DC stream')
if not readResult:
self.notify.error("Could not read dc file.")
self.hashVal = dcFile.getHash()
for n in xrange(dcFile.getNumImportModules()):
moduleName = dcFile.getImportModule(n)[:]
suffix = moduleName.split('/')
moduleName = suffix[0]
suffix=suffix[1:]
if self.dcSuffix in suffix:
moduleName += self.dcSuffix
elif self.dcSuffix == 'UD' and 'AI' in suffix: #HACK:
moduleName += 'AI'
importSymbols = []
for i in xrange(dcFile.getNumImportSymbols(n)):
symbolName = dcFile.getImportSymbol(n, i)
suffix = symbolName.split('/')
symbolName = suffix[0]
suffix=suffix[1:]
if self.dcSuffix in suffix:
symbolName += self.dcSuffix
elif self.dcSuffix == 'UD' and 'AI' in suffix: #HACK:
symbolName += 'AI'
importSymbols.append(symbolName)
self.importModule(dcImports, moduleName, importSymbols)
for i in xrange(dcFile.getNumClasses()):
dclass = dcFile.getClass(i)
number = dclass.getNumber()
className = dclass.getName() + self.dcSuffix
classDef = dcImports.get(className)
if classDef is None and self.dcSuffix == 'UD':
className = dclass.getName() + 'AI'
classDef = dcImports.get(className)
if classDef == None:
className = dclass.getName()
classDef = dcImports.get(className)
if classDef is None:
self.notify.debug("No class definition for %s." % (className))
else:
if type(classDef) == types.ModuleType:
if not hasattr(classDef, className):
self.notify.warning("Module %s does not define class %s." % (className, className))
continue
classDef = getattr(classDef, className)
if type(classDef) != types.ClassType and type(classDef) != types.TypeType:
self.notify.error("Symbol %s is not a class name." % (className))
else:
dclass.setClassDef(classDef)
self.dclassesByName[className] = dclass
if number >= 0:
self.dclassesByNumber[number] = dclass
if self.hasOwnerView():
ownerDcSuffix = self.dcSuffix + 'OV'
ownerImportSymbols = {}
for n in xrange(dcFile.getNumImportModules()):
moduleName = dcFile.getImportModule(n)
suffix = moduleName.split('/')
moduleName = suffix[0]
suffix=suffix[1:]
if ownerDcSuffix in suffix:
moduleName = moduleName + ownerDcSuffix
importSymbols = []
for i in xrange(dcFile.getNumImportSymbols(n)):
symbolName = dcFile.getImportSymbol(n, i)
suffix = symbolName.split('/')
symbolName = suffix[0]
suffix=suffix[1:]
if ownerDcSuffix in suffix:
symbolName += ownerDcSuffix
importSymbols.append(symbolName)
ownerImportSymbols[symbolName] = None
self.importModule(dcImports, moduleName, importSymbols)
for i in xrange(dcFile.getNumClasses()):
dclass = dcFile.getClass(i)
if ((dclass.getName()+ownerDcSuffix) in ownerImportSymbols):
number = dclass.getNumber()
className = dclass.getName() + ownerDcSuffix
classDef = dcImports.get(className)
if classDef is None:
self.notify.error("No class definition for %s." % className)
else:
if type(classDef) == types.ModuleType:
if not hasattr(classDef, className):
self.notify.error("Module %s does not define class %s." % (className, className))
classDef = getattr(classDef, className)
dclass.setOwnerClassDef(classDef)
self.dclassesByName[className] = dclass
ConnectionRepository.ConnectionRepository = ConnectionRepository_override
# We also need timezone stuff:
class dictloader(object):
def __init__(self, dict):
self.dict = dict
def get_data(self, key):
return self.dict.get(key.replace('\\','/'))
import pytz
pytz.__loader__ = dictloader(game_data.ZONEINFO)
# Finally, start the game:
import toontown.toonbase.ToontownStart
|
[
"sweep14@gmail.com"
] |
sweep14@gmail.com
|
a822fbeeb592b742c4ddbe11b82b3ead6703f4e6
|
26e2c68f929ecc8bb5c20c6b8cd200b66d99def5
|
/DjangoDopLEsson/products/migrations/0001_initial.py
|
0ce5c7a66193f21ead1baaf75f96d6d86c10e249
|
[] |
no_license
|
kirigaikabuto/DjangoLessonsPart
|
ad19c1da0d1da27830c6fdf1b07353632bbc097d
|
4442518ae1f0a8641e066c9a63ff4e55e04d5fe5
|
refs/heads/master
| 2022-11-28T10:29:54.428001
| 2020-08-03T09:26:42
| 2020-08-03T09:26:42
| 273,497,052
| 0
| 0
| null | 2020-08-03T09:26:43
| 2020-06-19T13:11:15
|
Python
|
UTF-8
|
Python
| false
| false
| 590
|
py
|
# Generated by Django 3.0.7 on 2020-06-24 05:41
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('price', models.IntegerField()),
],
),
]
|
[
"ytleugazy@dar.kz"
] |
ytleugazy@dar.kz
|
910aba7d092e6fe88237d6d7c73f25a5638d20a8
|
c70dfc0d74b34e41f7d2dbdbd6bfaca2f79af55b
|
/cyp/models/convnet.py
|
7e74e171c0c26477317c14467986b5411a787c77
|
[
"MIT"
] |
permissive
|
jeangolay/pycrop-yield-prediction
|
a2c65fa3bd704d1d3251318a9afe39bfcd05cf10
|
1b36b673cc58b506ad4d3c8bd6b6917ac5a72d28
|
refs/heads/master
| 2021-02-18T06:58:31.844163
| 2019-11-25T13:45:55
| 2019-11-25T13:45:55
| 245,172,915
| 0
| 1
|
MIT
| 2020-03-05T13:39:21
| 2020-03-05T13:39:20
| null |
UTF-8
|
Python
| false
| false
| 7,994
|
py
|
import torch
from torch import nn
import torch.nn.functional as F
from pathlib import Path
from .base import ModelBase
class ConvModel(ModelBase):
"""
A PyTorch replica of the CNN structured model from the original paper. Note that
this class assumes feature_engineering was run with channels_first=True
Parameters
----------
in_channels: int, default=9
Number of channels in the input data. Default taken from the number of bands in the
MOD09A1 + the number of bands in the MYD11A2 datasets
dropout: float, default=0.5
Default taken from the original paper
dense_features: list, or None, default=None.
output feature size of the Linear layers. If None, default values will be taken from the paper.
The length of the list defines how many linear layers are used.
time: int, default=32
The number of timesteps being used. This is necessary to pass in the initializer since it will
affect the size of the first dense layer, which is the flattened output of the conv layers
savedir: pathlib Path, default=Path('data/models')
The directory into which the models should be saved.
device: torch.device
Device to run model on. By default, checks for a GPU. If none exists, uses
the CPU
"""
def __init__(self, in_channels=9, dropout=0.5, dense_features=None, time=32,
savedir=Path('data/models'), use_gp=True, sigma=1, r_loc=0.5, r_year=1.5,
sigma_e=0.01, sigma_b=0.01,
device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')):
# save values for reinitialization
self.in_channels = in_channels
self.dropout = dropout
self.dense_features = dense_features
self.time = time
model = ConvNet(in_channels=in_channels, dropout=dropout,
dense_features=dense_features, time=time)
if dense_features is None:
num_dense_layers = 2
else:
num_dense_layers = len(dense_features)
model_weight = f'dense_layers.{num_dense_layers - 1}.weight'
model_bias = f'dense_layers.{num_dense_layers - 1}.bias'
super().__init__(model, model_weight, model_bias, 'cnn', savedir, use_gp, sigma, r_loc,
r_year, sigma_e, sigma_b, device)
def reinitialize_model(self, time=None):
# the only thing which changes here is the time value, since this affects the
# size of the first dense layer.
if time is None:
time = self.time
model = ConvNet(in_channels=self.in_channels, dropout=self.dropout,
dense_features=self.dense_features, time=time)
if self.device.type != 'cpu':
model = model.cuda()
self.model = model
class ConvNet(nn.Module):
"""
A crop yield conv net.
For a description of the parameters, see the ConvModel class.
Only handles strides of 1 and 2
"""
def __init__(self, in_channels=9, dropout=0.5, dense_features=None, time=32):
super().__init__()
# values taken from the paper
in_out_channels_list = [in_channels, 128, 256, 256, 512, 512, 512]
stride_list = [None, 1, 2, 1, 2, 1, 2]
# Figure out the size of the final flattened conv layer, which
# is dependent on the input size
num_divisors = sum([1 if i == 2 else 0 for i in stride_list])
for i in range(num_divisors):
if time % 2 != 0:
time += 1
time /= 2
if dense_features is None:
dense_features = [2048, 1]
dense_features.insert(0, int(in_out_channels_list[-1] * time * 4))
assert len(stride_list) == len(in_out_channels_list), \
"Stride list and out channels list must be the same length!"
self.convblocks = nn.ModuleList([
ConvBlock(in_channels=in_out_channels_list[i-1],
out_channels=in_out_channels_list[i],
kernel_size=3, stride=stride_list[i],
dropout=dropout) for
i in range(1, len(stride_list))
])
self.dense_layers = nn.ModuleList([
nn.Linear(in_features=dense_features[i-1],
out_features=dense_features[i]) for
i in range(1, len(dense_features))
])
self.initialize_weights()
def initialize_weights(self):
for convblock in self.convblocks:
nn.init.kaiming_uniform_(convblock.conv.weight.data)
# http://cs231n.github.io/neural-networks-2/#init
# see: Initializing the biases
nn.init.constant_(convblock.conv.bias.data, 0)
for dense_layer in self.dense_layers:
nn.init.kaiming_uniform_(dense_layer.weight.data)
nn.init.constant_(dense_layer.bias.data, 0)
def forward(self, x, return_last_dense=False):
"""
If return_last_dense is true, the feature vector generated by the second to last
dense layer will also be returned. This is then used to train a Gaussian Process model.
"""
for block in self.convblocks:
x = block(x)
# flatten
x = x.view(x.shape[0], -1)
for layer_number, dense_layer in enumerate(self.dense_layers):
x = dense_layer(x)
if return_last_dense and (layer_number == len(self.dense_layers) - 2):
output = x
if return_last_dense:
return x, output
return x
class ConvBlock(nn.Module):
"""
A 2D convolution, followed by batchnorm, a ReLU activation, and dropout
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, dropout):
super().__init__()
self.conv = Conv2dSamePadding(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride)
self.batchnorm = nn.BatchNorm2d(num_features=out_channels)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.relu(self.batchnorm(self.conv(x)))
return self.dropout(x)
class Conv2dSamePadding(nn.Conv2d):
"""Represents the "Same" padding functionality from Tensorflow.
See: https://github.com/pytorch/pytorch/issues/3867
This solution is mostly copied from
https://github.com/pytorch/pytorch/issues/3867#issuecomment-349279036
Note that the padding argument in the initializer doesn't do anything now
"""
def forward(self, input):
return conv2d_same_padding(input, self.weight, self.bias, self.stride,
self.dilation, self.groups)
def conv2d_same_padding(input, weight, bias=None, stride=1, dilation=1, groups=1):
# stride and dilation are expected to be tuples.
# first, we'll figure out how much padding is necessary for the rows
input_rows = input.size(2)
filter_rows = weight.size(2)
effective_filter_size_rows = (filter_rows - 1) * dilation[0] + 1
out_rows = (input_rows + stride[0] - 1) // stride[0]
padding_rows = max(0, (out_rows - 1) * stride[0] + effective_filter_size_rows - input_rows)
rows_odd = (padding_rows % 2 != 0)
# same for columns
input_cols = input.size(3)
filter_cols = weight.size(3)
effective_filter_size_cols = (filter_cols - 1) * dilation[1] + 1
out_cols = (input_cols + stride[1] - 1) // stride[1]
padding_cols = max(0, (out_cols - 1) * stride[1] + effective_filter_size_cols - input_cols)
cols_odd = (padding_cols % 2 != 0)
if rows_odd or cols_odd:
input = F.pad(input, [0, int(cols_odd), 0, int(rows_odd)])
return F.conv2d(input, weight, bias, stride,
padding=(padding_rows // 2, padding_cols // 2),
dilation=dilation, groups=groups)
|
[
"gabriel.tseng@mail.mcgill.ca"
] |
gabriel.tseng@mail.mcgill.ca
|
d857d210c85ab7e5b44aa427f2403019ebe176a1
|
f08d137b7821d79672c91e5f06967ffa1f90e278
|
/.history/Python/Main_py_20211021101357.py
|
24180ebf2cf62ee0838fe71a2cd46e81d5e858e6
|
[] |
no_license
|
anhviet-key/hello-cac-ban
|
a39ffb1731a77dd171523ea145f5d8b62fccde7c
|
18411b51add7e3277d42869f8a50c67111337983
|
refs/heads/main
| 2023-08-23T09:02:01.074958
| 2021-10-27T07:48:47
| 2021-10-27T07:48:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
from Sub_py import emailProcess, print_Mess
def main():
emails = ["hello.vi@gmail.com", "Package@yahoo.com", "Test@gmail.dev"]
for email in emails:
usemailProcess(email)
if __name__ == "__main__":
main()
|
[
"92666546+anhviet-key@users.noreply.github.com"
] |
92666546+anhviet-key@users.noreply.github.com
|
9dcddbcc8d5d3f81e9b43c1b674bb99bf74081e6
|
495943f075f6a641d456d66deebb208847cb6c50
|
/bases/bases.py
|
4b971bac31d64037ece100affafdb194b8cec092
|
[] |
no_license
|
LukazDane/CS-1.3
|
377a6ef77c3db4a497f492ed73a3ba2487531b93
|
9cee1f71b9374a54a1fe336cd1f8db1a51275ef8
|
refs/heads/master
| 2022-07-04T00:26:48.498036
| 2020-05-11T02:37:00
| 2020-05-11T02:37:00
| 255,189,216
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,209
|
py
|
import string
import math
# ##### https://en.wikipedia.org/wiki/List_of_Unicode_characters
# Hint: Use these string constants to encode/decode hexadecimal digits and more
# string.digits is '0123456789'
# string.hexdigits is '0123456789abcdefABCDEF'
# string.ascii_lowercase is 'abcdefghijklmnopqrstuvwxyz'
# string.ascii_uppercase is 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# string.ascii_letters is ascii_lowercase + ascii_uppercase
# string.printable is digits + ascii_letters + punctuation + whitespace
def decode(digits, base):
decoded = []
"""Decode given digits in given base to number in base 10.
digits: str -- string representation of number (in given base)
base: int -- base of given number
return: int -- integer representation of number (in base 10)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, 'base is out of range: {}'.format(base)
# TODO: Decode digits from binary (base 2)
ndec = 0
digits = digits[::-1]
# if base == 2:
for i in range(len(digits)):
digit = int(digits[i], base=base)
ndec += digit * base ** i
return ndec
# elif base == 16:
# x = int(str(digits), 16)
# print(x)
# else:
# reverse the digits
# digits = digits[::-1]
# # print(digits)
# # variable to hold our answer
# num = 0
# # loop through each index
# for x in range(len(digits)):
# # variable to hold each index while we work it out
# uni = digits[x]
# if uni.isdigit():
# # if already a number (0-9) keep it
# uni = int(uni)
# # print(uni)
# else: # assumes alphabet
# # convert to unicode uppercase val, subtract calue of A and add 10 to get base 10 number
# uni = ord(uni.upper())-ord('A')+10
# # unicode a -> A = 65 | A(65) - A(65) + 10 = 10(a)
# # unicode b -> B = 66 | B(66) - A(65) + 10 = 11(b)
# # print(uni)
# num += uni*(base**x)
# decoded.append(num)
# print(decoded)
print(decode('1110 1100', 2))
print(decode('fff', 16))
print(decode("1a2b", 32))
def encode(number, base):
"""Encode given number in base 10 to digits in given base.
number: int -- integer representation of number (in base 10)
base: int -- base to convert to
return: str -- string representation of number (in given base)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, 'base is out of range: {}'.format(base)
# Handle unsigned numbers only for now
assert number >= 0, 'number is negative: {}'.format(number)
# https://stackoverflow.com/questions/1181919/python-base-36-encoding
base_36 = string.digits + string.ascii_uppercase
result = []
while number > 0:
q = number / base
remainder = number % base
sep_q = str(q).split(".")
number = int(sep_q[0])
if 9 < remainder < base:
remainder = base_36[remainder].lower()
result.insert(0, str(remainder))
return "".join(result)
def convert(digits, base1, base2):
"""Convert given digits in base1 to digits in base2.
digits: str -- string representation of number (in base1)
base1: int -- base of given number
base2: int -- base to convert to
return: str -- string representation of number (in base2)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base1 <= 36, 'base1 is out of range: {}'.format(base1)
assert 2 <= base2 <= 36, 'base2 is out of range: {}'.format(base2)
decoded = decode(digits, base1)
return encode(decoded, base2)
def main():
"""Read command-line arguments and convert given digits between bases."""
import sys
args = sys.argv[1:] # Ignore script file name
if len(args) == 3:
digits = args[0]
base1 = int(args[1])
base2 = int(args[2])
# Convert given digits between bases
result = convert(digits, base1, base2)
print('{} in base {} is {} in base {}'.format(
digits, base1, result, base2))
else:
print('Usage: {} digits base1 base2'.format(sys.argv[0]))
print('Converts digits from base1 to base2')
if __name__ == '__main__':
main()
|
[
"deandrevidal@aol.com"
] |
deandrevidal@aol.com
|
4faf1c90487d459da70158af665f0ebc2c9cf364
|
d75fc0ae459066bfb15187d1c902e22000153dc4
|
/TestScript/tenderverificationONSupplierStatus.py
|
44e27edee4b8c97a036372329b6aa5c7f6dc4e37
|
[] |
no_license
|
sureshkumarkadi/Project
|
875a05a752164ff9620286ab8261c7774acc4f27
|
4652edfa6ac47d6f44bd41e03314d96753e09d92
|
refs/heads/master
| 2020-03-25T19:52:23.124215
| 2018-08-09T05:28:08
| 2018-08-09T05:28:08
| 144,104,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,308
|
py
|
#-------------------------------------------------------------------------------
# Name: module2
# Purpose:
#
# Author: mathew.jacob
#
# Created: 25/08/2016
# Copyright: (c) mathew.jacob 2016
# Licence: <your licence>
#-------------------------------------------------------------------------------
from selenium.webdriver.support.ui import WebDriverWait
import unittest
import sys
import os
import time
import traceback
dir_path = os.path.dirname(os.path.realpath(__file__))
folder_path=os.path.abspath(os.path.join(dir_path, os.pardir))
sys.path.insert(0,folder_path+"\Library")
sys.path.insert(0,folder_path+"\Syslibrary")
sys.path.insert(0,folder_path+"\Data")
sys.path.insert(0,folder_path+"\Object")
from launcheTender import LauncheTenderclass
from tenderDetails import Tenderdetails
from tenderDetails import SubmitTenderclass
from datadriven import DataDriver
from setupenviron import setupValue
from logouteTender import Userprofilemenu
##from RESTAPI import ReopentenderusingRESTAPIclass
from RESTAPIStaging import ReopentenderusingRESTAPIclass
from logdriver import logvalue
logs=logvalue.logger
logclose=logvalue()
ftime = time.mktime(time.localtime())
ptime=time.strftime("%d-%m-%Y_%H%M%S", time.localtime(ftime))
#filename = 'TestCase-100358-{0}.png'.format(ptime)
tf = 'test_TenderverificationONSupplierStatus'
filename = 'Testcase-%s.png' %(tf)
path= setupValue().screenpath
fullpath = os.path.join(path,filename)
#Test case Number = 100358
class TenderverificationONSupplierStatus(unittest.TestCase):
def test_TenderverificationONSupplierStatus(self):
try:
browserInstance = setupValue()
browser = browserInstance.setupfunction()
browser.implicitly_wait(5)
time.sleep(1)
LauncheTender1 = LauncheTenderclass()
browser = LauncheTender1.openURL(browser)
browser.implicitly_wait(5)
time.sleep(1)
tenderDetails = Tenderdetails()
browser = LauncheTender1.subcontractorValidlogin(browser)
#browser = LauncheTender1.list_Organisation(browser)
#browser = LauncheTender1.verifyorganisationdetails(browser)
browser = LauncheTender1.list_project(browser)
time.sleep(1)
browser = tenderDetails.Subcontratorproject(browser)
time.sleep(2)
tenderverifySupplierstatus1 = DataDriver()
tenderverifySupplierstatus_path = tenderverifySupplierstatus1.readfromXML(folder_path+'\Object\TenderPage.xml','eTender','tenderverifySupplierstatus')
time.sleep(1)
tenderverifySupplierstatus = browser.find_element_by_xpath(tenderverifySupplierstatus_path) #Webelement for values
time.sleep(1)
self.assertEqual(tenderverifySupplierstatus.text,'Review pending')
logs.info("Test Case No : 100358 Passed Successfully")
except Exception:
logs.error("Validation with Test Case No: 100358 failed")
browser.save_screenshot(fullpath)
traceback.print_exc(file=sys.stdout)
self.fail("Test Case No: 100358 failed")
browser.implicitly_wait(5)
finally:
LauncheTender1.closebrowser(browser)
if __name__ == '__main__':
unittest.main()
|
[
"Suresh.Kumar@causeway.com"
] |
Suresh.Kumar@causeway.com
|
fe1157f372e8999831140b5c8835adac1ce983b2
|
bc572eca7a03aec83ee55300887a21cad3dbd160
|
/tools/Polygraphy/tests/comparator/test_postprocess.py
|
0cbba453125e5baa918d358612e41d35c9cb243d
|
[
"Apache-2.0",
"BSD-3-Clause",
"ISC",
"BSD-2-Clause",
"MIT"
] |
permissive
|
wuqiangch/TensorRT
|
fba0029dc5c0b3b9ffa091e45f26d8d10d702393
|
d04182cd0086c70db4a8ad30e0d7675c4eb33782
|
refs/heads/master
| 2023-05-31T21:04:01.079351
| 2021-06-23T20:37:20
| 2021-06-25T19:39:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,520
|
py
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from polygraphy.comparator import PostprocessFunc, IterationResult
class TestTopK(object):
def test_basic(self):
arr = np.array([1, 2, 3, 4, 5], dtype=np.float32)
func = PostprocessFunc.topk_func(k=3)
top_k = func(IterationResult({"x": arr}))
assert np.all(top_k["x"] == [4, 3, 2])
def test_k_can_exceed_array_len(self):
arr = np.array([1, 2, 3, 4, 5], dtype=np.float32)
func = PostprocessFunc.topk_func(k=10)
top_k = func(IterationResult({"x": arr}))
assert np.all(top_k["x"] == [4, 3, 2, 1, 0])
def test_per_output_top_k(self):
arr = np.array([1, 2, 3, 4, 5], dtype=np.float32)
func = PostprocessFunc.topk_func(k={"": 10, "y": 2})
top_k = func(IterationResult({"x": arr, "y": arr}))
assert np.all(top_k["x"] == [4, 3, 2, 1, 0])
assert np.all(top_k["y"] == [4, 3])
|
[
"rajeevsrao@users.noreply.github.com"
] |
rajeevsrao@users.noreply.github.com
|
a6d0047071d0b232286f98b5287c49a605e6a21e
|
320a98d428bf06eff6f3f209b1eadeb366a65482
|
/common/version.py
|
c4dd9de7ffd4c1b577a51386ff7b1cc74c444cd3
|
[] |
no_license
|
Ryan--Yang/share
|
6fe8b21918206fed903bd7a315216b47e58f697e
|
4acc658f7c0a8f1b50f7b5c0b8884b96fe1e137d
|
refs/heads/master
| 2020-12-31T02:42:22.125477
| 2013-12-04T07:27:53
| 2013-12-24T01:54:38
| 14,791,494
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,644
|
py
|
from util import *
default_java_file = '/usr/lib/jvm/default-java'
gcc_file = '/usr/bin/gcc'
def handle_option():
global args
parser = argparse.ArgumentParser(description = 'Set up the version of Java',
formatter_class = argparse.RawTextHelpFormatter,
epilog = '''
examples:
python %(prog)s -v 1.5
python %(prog)s -v 1.7.0_45
''')
parser.add_argument('-s', '--set-version', dest='set_version', help='set version')
parser.add_argument('-g', '--get', dest='get_version', help='get version', action='store_true')
parser.add_argument('-t', '--target', dest='target', help='target to set version with', choices=['java', 'gcc'], default='gcc')
args = parser.parse_args()
if len(sys.argv) <= 1:
parser.print_help()
def setup():
pass
def get_version():
if not args.get_version:
return
if args.target == 'java':
get_version_java()
elif args.target == 'gcc':
get_version_gcc()
def set_version():
if not args.set_version:
return
if args.target == 'java':
set_version_java()
elif args.target == 'gcc':
set_version_gcc()
def get_version_java():
java_version_result = execute('java -version', silent=True, catch=True)
match = re.match('java version "(.*)"', java_version_result)
java_version = match.group(1)
java_home_result = os.getenv('JAVA_HOME')
if java_home_result:
match = re.match('jdk(.*)', java_home_result)
if match:
java_home = match.group(1)
else:
error('JAVA_HOME is not expected')
else:
java_home = 'NULL'
if os.path.exists(default_java_file):
default_java_result = execute('ls -l ' + default_java_file, silent=True, catch=True)
match = re.match('.*jdk(.*)', default_java_result)
if match:
default_java = match.group(1)
else:
error('default-java is not expected')
else:
default_java = 'NULL'
#info(java_version_result)
#if java_home_result:
# info(java_home_result)
#if default_java_result:
# info(default_java_result)
info('java -v: ' + java_version)
info('JAVA_HOME: ' + java_home)
info('default-java: ' + default_java)
def set_version_java():
if args.set_version == '1.5':
version = '1.5.0_22'
elif args.set_version == '1.6':
version = '1.6.0_45'
elif args.set_version == '1.7':
version = '1.7.0_45'
else:
version = args.set_version
execute('sudo update-alternatives --install /usr/bin/javac javac /usr/lib/jvm/jdk' + version + '/bin/javac 50000')
execute('sudo update-alternatives --install /usr/bin/java java /usr/lib/jvm/jdk' + version + '/bin/java 50000')
execute('sudo update-alternatives --install /usr/bin/javaws javaws /usr/lib/jvm/jdk' + version + '/bin/javaws 50000')
execute('sudo update-alternatives --install /usr/bin/javap javap /usr/lib/jvm/jdk' + version + '/bin/javap 50000')
execute('sudo update-alternatives --install /usr/bin/jar jar /usr/lib/jvm/jdk' + version + '/bin/jar 50000')
execute('sudo update-alternatives --install /usr/bin/jarsigner jarsigner /usr/lib/jvm/jdk' + version + '/bin/jarsigner 50000')
execute('sudo update-alternatives --config javac')
execute('sudo update-alternatives --config java')
execute('sudo update-alternatives --config javaws')
execute('sudo update-alternatives --config javap')
execute('sudo update-alternatives --config jar')
execute('sudo update-alternatives --config jarsigner')
execute('sudo rm -f ' + default_java_file)
execute('sudo ln -s /usr/lib/jvm/jdk' + version + ' /usr/lib/jvm/default-java')
get_version_java()
def get_version_gcc():
gcc_version_result = execute('ls -l ' + gcc_file, silent=True, catch=True)
match = re.match('.+gcc-(.+)', gcc_version_result)
if match:
gcc_version = match.group(1)
else:
error('gcc is not expected')
info('gcc version: ' + gcc_version)
def set_version_gcc():
version = args.set_version
execute('sudo rm -f /usr/bin/gcc', silent=True)
execute('sudo ln -s /usr/bin/gcc-' + version + ' /usr/bin/gcc', silent=True)
execute('sudo rm -f /usr/bin/g++', silent=True)
execute('sudo ln -s /usr/bin/g++-' + version + ' /usr/bin/g++', silent=True)
execute('sudo rm -f /usr/bin/cc', silent=True)
execute('sudo ln -s /usr/bin/gcc /usr/bin/cc', silent=True)
get_version_gcc()
if __name__ == "__main__":
handle_option()
setup()
get_version()
set_version()
|
[
"yang.gu@intel.com"
] |
yang.gu@intel.com
|
4de8e6f3f997c044235468a34eb39dc9ca07df91
|
df458ae26f8e1b59e4fc4273701f77cc2e340a3c
|
/tests/test_viewgroups.py
|
a7c869008bd24a94bbc80a387a23758393244f2e
|
[
"BSD-3-Clause"
] |
permissive
|
radiac/django-fastview
|
64bcf3f07ed62a1863b5a402d1fedc998ed433f3
|
daf898f416c3f89efc3ef290f8158232d055af36
|
refs/heads/develop
| 2023-03-20T22:49:14.789026
| 2022-10-02T19:43:22
| 2022-10-02T19:43:22
| 230,815,383
| 13
| 1
|
NOASSERTION
| 2023-03-04T05:44:10
| 2019-12-29T23:26:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,637
|
py
|
"""
Test viewgroup
"""
from fastview import permissions
from fastview.viewgroups import ModelViewGroup
from .app.models import Entry
def test_modelviewgroup_permissions__permissions_set_on_subclass():
class TestPermission(permissions.Permission):
pass
test_permission = TestPermission()
class Entries(ModelViewGroup):
permission = test_permission
model = Entry
# Permissions are set at instantiation
entries = Entries()
assert entries.index_view.get_permission() == test_permission
assert entries.detail_view.get_permission() == test_permission
assert entries.create_view.get_permission() == test_permission
assert entries.update_view.get_permission() == test_permission
assert entries.delete_view.get_permission() == test_permission
# Not at definition
assert isinstance(Entries.index_view.get_permission(), permissions.Denied)
assert isinstance(Entries.detail_view.get_permission(), permissions.Denied)
assert isinstance(Entries.create_view.get_permission(), permissions.Denied)
assert isinstance(Entries.update_view.get_permission(), permissions.Denied)
assert isinstance(Entries.delete_view.get_permission(), permissions.Denied)
def test_modelviewgroup_index__index_lists(add_url, client, user_owner):
class Entries(ModelViewGroup):
permission = permissions.Public()
model = Entry
Entry.objects.create(author=user_owner)
Entry.objects.create(author=user_owner)
add_url("", Entries().include(namespace="entries"))
response = client.get("/")
assert len(response.context_data["object_list"]) == 2
|
[
"git@radiac.net"
] |
git@radiac.net
|
5415aabb59728ebc4a5c6162aa5db91bddd6490d
|
a9e3f3ad54ade49c19973707d2beb49f64490efd
|
/Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/cms/envs/devstack_docker.py
|
2eece814deae2c31cf5456b34af9e0f386c38c4e
|
[
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"MIT"
] |
permissive
|
luque/better-ways-of-thinking-about-software
|
8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d
|
5809eaca7079a15ee56b0b7fcfea425337046c97
|
refs/heads/master
| 2021-11-24T15:10:09.785252
| 2021-11-22T12:14:34
| 2021-11-22T12:14:34
| 163,850,454
| 3
| 1
|
MIT
| 2021-11-22T12:12:31
| 2019-01-02T14:21:30
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 129
|
py
|
""" Overrides for Docker-based devstack. """
from .devstack import * # pylint: disable=wildcard-import, unused-wildcard-import
|
[
"rafael.luque@osoco.es"
] |
rafael.luque@osoco.es
|
6653c822271f595c1ee6011406a88613852cd291
|
3325f16c04ca8e641cbd58e396f983542b793091
|
/Seção 13 - Leitura e Escrita em Arquivos/Exercícios da Seção/Exercício_04.py
|
3140f4c4229bb75d6849a37c399fbae14f608b1f
|
[] |
no_license
|
romulovieira777/Programacao_em_Python_Essencial
|
ac929fbbd6a002bcc689b8d6e54d46177632c169
|
e81d219db773d562841203ea370bf4f098c4bd21
|
refs/heads/master
| 2023-06-11T16:06:36.971113
| 2021-07-06T20:57:25
| 2021-07-06T20:57:25
| 269,442,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,434
|
py
|
"""
4) Faça um programa que receba do usuário um arquivo texto e mostre na tela
quantas letras são vogais e quantas são consoantes
"""
from Exercício_03 import conta_vogais
def conta_consoantes(txt):
"""Retorna a quantidade de consoantes que existe no texto recebido por parâmetro.
Caso o que for recebido por parâmetro não seja uma string, retornará um valor do tipo None"""
try:
consoantes = ['b', 'c', 'd', 'f', 'g', 'h', 'j', 'k', 'l', 'm', 'n',
'p', 'q', 'r', 's', 't', 'v', 'w', 'x', 'y', 'z']
txt = txt.lower()
qtd = 0
for consoante in consoantes:
qtd += txt.count(consoante)
return qtd
except AttributeError:
return None
if __name__ == '__main__':
nome_arquivo = str(input("Digite o caminho do arquivo ou o nome do arquivo "
"(caso o arquivo esteja no mesmo local do programa): "))
nome_arquivo = nome_arquivo if ".txt" in nome_arquivo else nome_arquivo + ".txt"
try:
with open(nome_arquivo, 'r', encoding='utf-8') as arquivo:
texto = arquivo.read()
print(f"\nO arquivo texto tem {conta_vogais(texto)} vogais e {conta_consoantes(texto)} consoantes!")
except FileNotFoundError:
print("\nArquivo informado não encontrado!")
except OSError:
print("\nO SO não aceita caracteres especiais em nomes de arquivo!")
|
[
"romulo.vieira777@gmail.com"
] |
romulo.vieira777@gmail.com
|
6d7552c80362211c8655afa1523750f82b5f34b9
|
cbdbb05b91a4463639deefd44169d564773cd1fb
|
/djangoproj/forms_lab/lab/models.py
|
a49bb957d8bc48b023dce230a3be6f848e11e28a
|
[] |
no_license
|
blazprog/py3
|
e26ef36a485809334b1d5a1688777b12730ebf39
|
e15659e5d5a8ced617283f096e82135dc32a8df1
|
refs/heads/master
| 2020-03-19T20:55:22.304074
| 2018-06-11T12:25:18
| 2018-06-11T12:25:18
| 136,922,662
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
from django.db import models
class Nakup(models.Model):
datum_nakupa = models.DateField()
trgovina = models.CharField(max_length=30)
class NakupIzdelki(models.Model):
nakup = models.ForeignKey(Nakup)
izdelek = models.CharField(max_length=30)
kolicina = models.IntegerField(default=1)
cena = models.FloatField(default=0)
|
[
"blaz.korosec@mentis.si"
] |
blaz.korosec@mentis.si
|
a3e8f85b15362854f00e8158fedd47775ff9a1fb
|
9b5597492e57313712c0a842ef887940f92636cd
|
/judge/sessions/2018Individual/sgessow@gmail.com/PB_02.py
|
f67c2bfbbc1ddd96d57cfd996db5fcf43c0930bf
|
[] |
no_license
|
onionhoney/codesprint
|
ae02be9e3c2354bb921dc0721ad3819539a580fa
|
fcece4daf908aec41de7bba94c07b44c2aa98c67
|
refs/heads/master
| 2020-03-11T11:29:57.052236
| 2019-05-02T22:04:53
| 2019-05-02T22:04:53
| 129,971,104
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 529
|
py
|
Cases=input()
Cases=int(Cases)
for i in range(Cases):
Num1= [int(x) for x in input().split()]
Num2= [int(x) for x in input().split()]
Num3= [int(x) for x in input().split()]
count=0
ans=[]
for i in Num1:
if i==1:
ans.append(count)
count=count+1
count=0
for i in Num2:
if i==1:
ans.append(count)
count=count+1
count=0
for i in Num3:
if i==1:
ans.append(count)
count=count+1
print(ans[0],ans[1],ans[2])
|
[
"root@codesprintla.com"
] |
root@codesprintla.com
|
618d26a1de085c3b232b50f8a719c096a1a4c389
|
b5ca0a2ce47fdb4306bbdffcb995eb7e6eac1b23
|
/Python/Regex and Parsing/Validating phone numbers/attempt2.py
|
2b2d832379b0eb3a6171a2ff4bfd378358e9b641
|
[] |
no_license
|
rsoemardja/HackerRank
|
ac257a66c3649534197b223b8ab55011d84fb9e1
|
97d28d648a85a16fbe6a5d6ae72ff6503a063ffc
|
refs/heads/master
| 2022-04-14T22:46:03.412359
| 2020-04-03T07:44:04
| 2020-04-03T07:44:04
| 217,687,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
import re
n = int(input())
for _ in range(n):
if re.fullmatch('[789]\d{9}', input()) != None:
print('YES')
else:
print('NO')
|
[
"rsoemardja@gmail.com"
] |
rsoemardja@gmail.com
|
6698de943e743d11300f391dd839dad9369a9914
|
c2f4afee3ec4faef7231da2e48c8fef3d309b3e3
|
/AppendFile.py
|
7fcd0d08932bb7aacd4bdcc2a8461d8776ca7cac
|
[] |
no_license
|
tanu312000/pyChapter
|
a723f99754ff2b21e694a9da3cb2c6ca0cd10fce
|
2fd28aefcbfaf0f6c34db90fdf0d77f9aea142ce
|
refs/heads/master
| 2020-05-03T15:51:34.334806
| 2019-03-31T16:17:45
| 2019-03-31T16:17:45
| 178,712,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 682
|
py
|
'''#WAP to read line by line from keyboard & append to a list until n times. Break the loop under break condition.
#Step:1
# 1)ReadLine from keyboard
Line=input("Enter a Line"+"\n")
# 2)Append in the list
li=[]
li.append(Line)
# 3)Until n times
while True:
Status=input("Do you want to continue")
# 4)Break the condition
if(Status=="no"):
break
print("Success")
# 5)Write in a file'''
fp=open("/home/tanu/programs/pythonFiles/AppendFile.txt",'a')
li=[]
while True:
Line=(input("Enter a line")+"\n")
li.append(Line)
Status=input("Do you want to Continue")
if(Status=="No"):
print("Success")
break
fp.writelines(li)
fp.close()
|
[
"tanurocks90@gmail.com"
] |
tanurocks90@gmail.com
|
011d3b37d7cb2a349a9f335003c370504e1fc868
|
26fb93b2df4b6226e708027beccb2f0d442a4522
|
/MWTracker/GUI_Qt4/SWTrackerViewer/SWTrackerViewer_GUI.py
|
fbf28e5f0a9297acd767443f273c16285271614c
|
[] |
no_license
|
KezhiLi/Multiworm_Tracking
|
bb4fd1d1beeab26f4402f5aa5a3f159700fa0009
|
cd91e968a557957e920d61db8bc10957666b6bc2
|
refs/heads/master
| 2021-01-22T16:10:23.591064
| 2016-04-13T15:51:18
| 2016-04-13T15:51:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,201
|
py
|
import sys
from PyQt4.QtGui import QApplication, QMainWindow, QFileDialog, QMessageBox, QFrame
from PyQt4.QtCore import QDir, QTimer, Qt, QPointF
from PyQt4.QtGui import QPixmap, QImage, QPainter, QColor, QFont, QPolygonF, QPen
from MWTracker.GUI_Qt4.SWTrackerViewer.SWTrackerViewer_ui import Ui_ImageViewer
from MWTracker.GUI_Qt4.MWTrackerViewerSingle.MWTrackerViewerSingle_GUI import MWTrackerViewerSingle_GUI
from MWTracker.trackWorms.getSkeletonsTables import getWormMask, binaryMask2Contour
from MWTracker.intensityAnalysis.correctHeadTailIntensity import createBlocks, _fuseOverlapingGroups
import tables, os
import numpy as np
import pandas as pd
import cv2
import json
class SWTrackerViewer_GUI(MWTrackerViewerSingle_GUI):
def __init__(self, ui = ''):
if not ui:
super().__init__(Ui_ImageViewer())
else:
super().__init__(ui)
self.skel_block = []
self.skel_block_n = 0
self.is_stage_move = []
self.ui.spinBox_skelBlock.valueChanged.connect(self.changeSkelBlock)
def updateSkelFile(self):
super().updateSkelFile()
with tables.File(self.skel_file, 'r') as fid:
if '/provenance_tracking/INT_SKE_ORIENT' in fid:
prov_str = fid.get_node('/provenance_tracking/INT_SKE_ORIENT').read()
func_arg_str = json.loads(prov_str.decode("utf-8"))['func_arguments']
gap_size = json.loads(func_arg_str)['gap_size']
good = (self.trajectories_data['int_map_id']>0).values
has_skel_group = createBlocks(good, min_block_size = 0)
self.skel_block = _fuseOverlapingGroups(has_skel_group, gap_size = gap_size)
else:
self.skel_block = []
self.ui.spinBox_skelBlock.setMaximum(max(len(self.skel_block)-1,0))
self.ui.spinBox_skelBlock.setMinimum(0)
if self.skel_block_n != 0:
self.skel_block_n = 0
self.ui.spinBox_skelBlock.setValue(0)
else:
self.changeSkelBlock(0)
with tables.File(self.skel_file, 'r') as fid:
if '/stage_movement/stage_vec' in fid:
self.is_stage_move = np.isnan(fid.get_node('/stage_movement/stage_vec')[:,0])
else:
self.is_stage_move = []
def updateImage(self):
self.readImage()
self.drawSkelResult()
if len(self.is_stage_move) > 0 and self.is_stage_move[self.frame_number]:
painter = QPainter()
painter.begin(self.frame_qimg)
pen = QPen()
pen_width = 3
pen.setWidth(pen_width)
pen.setColor(Qt.red)
painter.setPen(pen)
painter.drawRect(1, 1, self.frame_qimg.width()-pen_width, self.frame_qimg.height()-pen_width);
painter.end()
print(1)
self.pixmap = QPixmap.fromImage(self.frame_qimg)
self.ui.imageCanvas.setPixmap(self.pixmap);
def changeSkelBlock(self, val):
self.skel_block_n = val
if len(self.skel_block) > 0:
self.ui.label_skelBlock.setText('Block limits: %i-%i' % (self.skel_block[self.skel_block_n]))
#move to the frame where the block starts
self.ui.spinBox_frame.setValue(self.skel_block[self.skel_block_n][0])
else:
self.ui.label_skelBlock.setText('')
#change frame number using the keys
def keyPressEvent(self, event):
#go the previous block
if event.key() == 91:
self.ui.spinBox_skelBlock.setValue(self.skel_block_n-1)
#go to the next block
elif event.key() == 93:
self.ui.spinBox_skelBlock.setValue(self.skel_block_n+1)
elif event.key() == 59:
if self.ui.checkBox_showLabel.isChecked():
self.ui.checkBox_showLabel.setChecked(0)
else:
self.ui.checkBox_showLabel.setChecked(1)
super().keyPressEvent(event)
if __name__ == '__main__':
app = QApplication(sys.argv)
ui = SWTrackerViewer_GUI()
ui.show()
sys.exit(app.exec_())
|
[
"ver228@gmail.com"
] |
ver228@gmail.com
|
25e72161c8d4276d21d755c960750c74d408ce34
|
8f75dae40363144b7ea0eccb1b2fab804ee60711
|
/tests/integration/goldens/credentials/samples/generated_samples/iamcredentials_v1_generated_iam_credentials_sign_blob_async.py
|
fffa6de4bc73a43e8c4de2347fdbc936e2ed972e
|
[
"Apache-2.0"
] |
permissive
|
software-dov/gapic-generator-python
|
a2298c13b02bff87888c2949f4909880c3fa2408
|
304b30d3b4ec9ccb730251154b10896146a52900
|
refs/heads/master
| 2022-06-04T00:14:28.559534
| 2022-02-28T18:13:26
| 2022-02-28T18:13:26
| 191,990,527
| 0
| 1
|
Apache-2.0
| 2022-01-27T19:35:04
| 2019-06-14T18:41:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,491
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for SignBlob
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-iam-credentials
# [START iamcredentials_v1_generated_IAMCredentials_SignBlob_async]
from google.iam import credentials_v1
async def sample_sign_blob():
# Create a client
client = credentials_v1.IAMCredentialsAsyncClient()
# Initialize request argument(s)
request = credentials_v1.SignBlobRequest(
name="name_value",
payload=b'payload_blob',
)
# Make the request
response = await client.sign_blob(request=request)
# Handle the response
print(response)
# [END iamcredentials_v1_generated_IAMCredentials_SignBlob_async]
|
[
"noreply@github.com"
] |
software-dov.noreply@github.com
|
33f06e48105dd16509b58527c0eed07ca7ed05a6
|
bc441bb06b8948288f110af63feda4e798f30225
|
/cmdb_sdk/model/notify/subscriber_pb2.pyi
|
f37ead231f841e671f9d1f218fbe6e05d86a7244
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,884
|
pyi
|
# @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from cmdb_sdk.model.notify.subscribe_info_pb2 import (
SubscribeInfo as cmdb_sdk___model___notify___subscribe_info_pb2___SubscribeInfo,
)
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class Subscriber(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
name = ... # type: typing___Text
admin = ... # type: typing___Text
callback = ... # type: typing___Text
ensName = ... # type: typing___Text
procNum = ... # type: builtin___int
msgType = ... # type: builtin___int
retry = ... # type: builtin___int
@property
def subscribeInfo(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[cmdb_sdk___model___notify___subscribe_info_pb2___SubscribeInfo]: ...
def __init__(self,
*,
name : typing___Optional[typing___Text] = None,
admin : typing___Optional[typing___Text] = None,
callback : typing___Optional[typing___Text] = None,
ensName : typing___Optional[typing___Text] = None,
procNum : typing___Optional[builtin___int] = None,
msgType : typing___Optional[builtin___int] = None,
retry : typing___Optional[builtin___int] = None,
subscribeInfo : typing___Optional[typing___Iterable[cmdb_sdk___model___notify___subscribe_info_pb2___SubscribeInfo]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Subscriber: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Subscriber: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"admin",b"admin",u"callback",b"callback",u"ensName",b"ensName",u"msgType",b"msgType",u"name",b"name",u"procNum",b"procNum",u"retry",b"retry",u"subscribeInfo",b"subscribeInfo"]) -> None: ...
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
dbf025d7bcfc7df0a48718eccc0b0cb14810a02c
|
c2f35e5d3cfbbb73188a0cd6c43d161738e63bd1
|
/12-Django框架学习/bj18/test2/booktest/admin.py
|
e10072641c8350576c99bd572fcb82581b21d2f6
|
[] |
no_license
|
yangh-zzf-itcast/Python_heima_Study
|
2a7cd0d801d9d6f49548905d373bb409efc4b559
|
7d753c1cdd5c46a0e78032e12b1d2f5d9be0bf68
|
refs/heads/master
| 2020-04-30T06:59:04.000451
| 2019-04-19T12:15:30
| 2019-04-19T12:15:30
| 176,670,172
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 188
|
py
|
from django.contrib import admin
from booktest.models import BookInfo, HeroInfo
# Register your models here.
# 注册模型类
admin.site.register(BookInfo)
admin.site.register(HeroInfo)
|
[
"2459846416@qq.com"
] |
2459846416@qq.com
|
8eb3b583ba00c21e0e51f30d62670c1da9f518e3
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/87/usersdata/171/57992/submittedfiles/contido.py
|
9b0e89a4443abb2d93f0a13c24567c49b26e2254
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
# -*- coding: utf-8 -*-
def quantidade(lista,lista2):
cont=0
for i in range(0,len(lista),1):
if lista[i]==lista2[i]:
cont=cont+1
return(cont)
n=int(input('digite o numero de elemento:'))
lista1=[]
for i in range(1,n+1,1):
cont=0
valor1=int(input('digite o numero á ser colocado na lista 1:'))
lista1.append(valor1)
n=int(input('digite o numero de elemento:'))
lista2=[]
for i in range(1,n+1,1):
cont=0
valor2=int(input('digite o numero á ser colocado na lista 2:'))
lista2.append(valor2)
if quantidade(lista1,lista2):
print(cont)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
744d50f28a2c94ad5282605b6d3bb4517f7916ea
|
0466559817d3a1be9409da2c83db99c4db3bacfe
|
/hubcheck/conf/config.py
|
b68b5bd69b995419b02cbc759340b2a456a15ce1
|
[
"MIT"
] |
permissive
|
ken2190/hubcheck
|
955cf9b75a1ee77e28256dfd3a780cfbc17de961
|
2ff506eb56ba00f035300862f8848e4168452a17
|
refs/heads/master
| 2023-03-20T15:17:12.949715
| 2015-09-29T16:11:18
| 2015-09-29T16:11:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
import os
from pkg_resources import resource_filename
class Config(object):
data_dir = resource_filename('hubcheck','data')
profiles_dir = resource_filename('hubcheck','profiles')
# user configuration variables
screenshot_dir = None
video_dir = None
config_filename = None
tdfname = ''
tdpass = ''
highlight_web_elements = False
scroll_to_web_elements = False
log_locator_updates = False
log_widget_attachments = False
proxy = None
hub_hostname = None
hub_version = None
tool_container_version = None
default_workspace_toolname = None
apps_workspace_toolname = None
# full path of the hub config file, used by toolcheck
configpath = None
settings = Config()
|
[
"telldsk@gmail.com"
] |
telldsk@gmail.com
|
8bcc7d5dab776217c266b88d9884fc3a7e5a583d
|
1bcb966740f47c0edc23e9b05afec55f2bcae36a
|
/app/game/appinterface/packageInfo.py
|
92697a828967b4ecf9a95dbc7d38fdf70e8c3d66
|
[] |
no_license
|
East196/diabloworld
|
0d2e9dbf650aa86fcc7b9fc1ef49912e79adb954
|
d7a83a21287ed66aea690ecb6b73588569478be6
|
refs/heads/master
| 2021-05-09T12:15:31.640065
| 2018-02-04T15:16:54
| 2018-02-04T15:16:54
| 119,007,609
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,226
|
py
|
#coding:utf8
'''
Created on 2011-4-13
@author: sean_lan
'''
from app.game.core.PlayersManager import PlayersManager
def getItemsInEquipSlotNew(dynamicId,characterId):
'''获取角色的装备栏信息
@param dynamicId: int 客户端的id
@param characterId: int 角色的id
'''
player = PlayersManager().getPlayerByID(characterId)
if not player or not player.CheckClient(dynamicId):
return {'result':False,'message':""}
equipmentList = player.pack.getEquipmentSlotItemList()
keys_copy = dict(equipmentList)
equipmentList_copy = []
for position in range(1,7):
item = keys_copy.get(position,None)
if item:
_item = {}
_item['itemid'] = item.baseInfo.id
_item['icon'] = item.baseInfo.getItemTemplateInfo().get('icon',0)
_item['tempid'] = item.baseInfo.getItemTemplateId()
_item['exp'] = item.exp
iteminfo = {'pos':position,'item':_item}
equipmentList_copy.append(iteminfo)
playerInfo = player.formatInfoForWeiXin()
data = {}
data['equip'] = equipmentList_copy
data['attack'] = playerInfo['attack']
data['fangyu'] = playerInfo['fangyu']
data['minjie'] = playerInfo['minjie']
return {'result':True,'message':u'','data':data}
def UserItemNew(dynamicId,characterId,tempid):
'''使用物品
'''
player = PlayersManager().getPlayerByID(characterId)
if not player or not player.CheckClient(dynamicId):
return {'result':False,'message':""}
data = player.pack.equipEquipmentByItemId(tempid)
return data
def GetPackageInfo(dynamicId,characterId):
'''获取包裹的信息
'''
player = PlayersManager().getPlayerByID(characterId)
if not player or not player.CheckClient(dynamicId):
return {'result':False,'message':""}
data = player.pack.getPackageItemList()
return data
def unloadedEquipment_new(dynamicId, characterId, itemId):
'''卸下装备
'''
player = PlayersManager().getPlayerByID(characterId)
if not player or not player.CheckClient(dynamicId):
return {'result':False,'message':""}
data = player.pack.unloaded(itemId)
return data
|
[
"2901180515@qq.com"
] |
2901180515@qq.com
|
bb6a13c53d923939882c90a3722dcb0ee6f65008
|
ecee6e84ba18100b621c7e06f493ae48e44a34fe
|
/build/navigation/nav_core/catkin_generated/pkg.installspace.context.pc.py
|
7154b8261ec9e1dfe4b7533a24c5c418fed5f7a6
|
[] |
no_license
|
theleastinterestingcoder/Thesis
|
6d59e06b16cbe1588a6454689248c88867de2094
|
3f6945f03a58f0eff105fe879401a7f1df6f0166
|
refs/heads/master
| 2016-09-05T15:30:26.501946
| 2015-05-11T14:34:15
| 2015-05-11T14:34:15
| 31,631,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/alfred/quan_ws/install/include".split(';') if "/home/alfred/quan_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "std_msgs;geometry_msgs;tf;costmap_2d".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "nav_core"
PROJECT_SPACE_DIR = "/home/alfred/quan_ws/install"
PROJECT_VERSION = "1.13.0"
|
[
"quanzhou64@gmail.com"
] |
quanzhou64@gmail.com
|
aa893d19b93fa4e46eae5303e87793c9a2afed4f
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_118/1093.py
|
fa244fab36bc58d7529d2c7242771c1da73f5714
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,133
|
py
|
def isPalindrome(num):
if num - int(num) != 0:
return False
num = str(int(num))
l = len(num)
for i in xrange(l):
if num[i] != num[l-i-1]:
return False
return True
def allFairAndSquare(a, b):
rtn = []
for i in xrange(a, b+1):
if isPalindrome(i) and isPalindrome(i**(0.5)):
rtn.append(i)
return rtn
f = open('C-small-attempt1.in', 'r')
g = open('output.txt', 'w')
n = int(f.readline())
count = 1
all = allFairAndSquare(1, 1000)
while count <= n:
rng = f.readline().split()
a, b = int(rng[0]), int(rng[1])
x, y = 1000, 1000
for i in xrange(len(all)):
if all[i] >= a:
x = i
break
for i in xrange(x, len(all)):
if all[i] > b:
y = i
break
total = 0
if x == 1000:
total = 0
elif y == 1000:
y = len(all)
total = y-x
else:
total = y-x
g.write("Case #" + str(count) + ": " + str(total) + '\n')
count += 1
f.close()
g.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
9a9e8b7d9284574442ab7b8a10207055a4e065fd
|
fa07f9ff0c833746a4195a9092f5831e1126b684
|
/03逻辑回归/tool/Read_Minist_Tool.py
|
20e00b911aaddbd62cbe3738177e435b941c794e
|
[] |
no_license
|
shiqiuwang/ML_basic_model
|
76c3b755dda772031bfba22860ee61bb2ea286fc
|
b6d7350332f3ef32ccc5dc69f81b629c5bcdd349
|
refs/heads/master
| 2023-03-23T10:23:08.130357
| 2021-03-20T16:43:30
| 2021-03-20T16:43:30
| 348,405,661
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,341
|
py
|
# coding=utf-8
import numpy as np
import struct
import matplotlib.pyplot as plt
# 训练集文件
train_images_idx3_ubyte_file = 'data/minist/train-images.idx3-ubyte'
# 训练集标签文件
train_labels_idx1_ubyte_file = 'data/minist/train-labels.idx1-ubyte'
# 测试集文件
test_images_idx3_ubyte_file = 'data/minist/t10k-images.idx3-ubyte'
# 测试集标签文件
test_labels_idx1_ubyte_file = 'data/minist/t10k-labels.idx1-ubyte'
def decode_idx3_ubyte(idx3_ubyte_file):
"""
解析idx3文件的通用函数
:param idx3_ubyte_file: idx3文件路径
:return: 数据集
"""
# 读取二进制数据
bin_data = open(idx3_ubyte_file, 'rb').read()
# 解析文件头信息,依次为魔数、图片数量、每张图片高、每张图片宽
offset = 0
fmt_header = '>iiii' #因为数据结构中前4行的数据类型都是32位整型,所以采用i格式,但我们需要读取前4行数据,所以需要4个i。我们后面会看到标签集中,只使用2个ii。
magic_number, num_images, num_rows, num_cols = struct.unpack_from(fmt_header, bin_data, offset)
print('魔数:%d, 图片数量: %d张, 图片大小: %d*%d' % (magic_number, num_images, num_rows, num_cols))
# 解析数据集
image_size = num_rows * num_cols
offset += struct.calcsize(fmt_header) #获得数据在缓存中的指针位置,从前面介绍的数据结构可以看出,读取了前4行之后,指针位置(即偏移位置offset)指向0016。
print(offset)
fmt_image = '>' + str(image_size) + 'B' #图像数据像素值的类型为unsigned char型,对应的format格式为B。这里还有加上图像大小784,是为了读取784个B格式数据,如果没有则只会读取一个值(即一副图像中的一个像素值)
print(fmt_image,offset,struct.calcsize(fmt_image))
images = np.empty((num_images, num_rows, num_cols))
#plt.figure()
for i in range(num_images):
if (i + 1) % 10000 == 0:
print('已解析 %d' % (i + 1) + '张')
print(offset)
images[i] = np.array(struct.unpack_from(fmt_image, bin_data, offset)).reshape((num_rows, num_cols))
#print(images[i])
offset += struct.calcsize(fmt_image)
# plt.imshow(images[i],'gray')
# plt.pause(0.00001)
# plt.show()
#plt.show()
return images
def decode_idx1_ubyte(idx1_ubyte_file):
"""
解析idx1文件的通用函数
:param idx1_ubyte_file: idx1文件路径
:return: 数据集
"""
# 读取二进制数据
bin_data = open(idx1_ubyte_file, 'rb').read()
# 解析文件头信息,依次为魔数和标签数
offset = 0
fmt_header = '>ii'
magic_number, num_images = struct.unpack_from(fmt_header, bin_data, offset)
print('魔数:%d, 图片数量: %d张' % (magic_number, num_images))
# 解析数据集
offset += struct.calcsize(fmt_header)
fmt_image = '>B'
labels = np.empty(num_images)
for i in range(num_images):
if (i + 1) % 10000 == 0:
print ('已解析 %d' % (i + 1) + '张')
labels[i] = struct.unpack_from(fmt_image, bin_data, offset)[0]
offset += struct.calcsize(fmt_image)
return labels
def load_train_images(idx_ubyte_file=train_images_idx3_ubyte_file):
"""
TRAINING SET IMAGE FILE (train-images-idx3-ubyte):
[offset] [type] [value] [description]
0000 32 bit integer 0x00000803(2051) magic number
0004 32 bit integer 60000 number of images
0008 32 bit integer 28 number of rows
0012 32 bit integer 28 number of columns
0016 unsigned byte ?? pixel
0017 unsigned byte ?? pixel
........
xxxx unsigned byte ?? pixel
Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).
:param idx_ubyte_file: idx文件路径
:return: n*row*col维np.array对象,n为图片数量
"""
return decode_idx3_ubyte(idx_ubyte_file)
def load_train_labels(idx_ubyte_file=train_labels_idx1_ubyte_file):
"""
TRAINING SET LABEL FILE (train-labels-idx1-ubyte):
[offset] [type] [value] [description]
0000 32 bit integer 0x00000801(2049) magic number (MSB first)
0004 32 bit integer 60000 number of items
0008 unsigned byte ?? label
0009 unsigned byte ?? label
........
xxxx unsigned byte ?? label
The labels values are 0 to 9.
:param idx_ubyte_file: idx文件路径
:return: n*1维np.array对象,n为图片数量
"""
return decode_idx1_ubyte(idx_ubyte_file)
def load_test_images(idx_ubyte_file=test_images_idx3_ubyte_file):
"""
TEST SET IMAGE FILE (t10k-images-idx3-ubyte):
[offset] [type] [value] [description]
0000 32 bit integer 0x00000803(2051) magic number
0004 32 bit integer 10000 number of images
0008 32 bit integer 28 number of rows
0012 32 bit integer 28 number of columns
0016 unsigned byte ?? pixel
0017 unsigned byte ?? pixel
........
xxxx unsigned byte ?? pixel
Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).
:param idx_ubyte_file: idx文件路径
:return: n*row*col维np.array对象,n为图片数量
"""
return decode_idx3_ubyte(idx_ubyte_file)
def load_test_labels(idx_ubyte_file=test_labels_idx1_ubyte_file):
"""
TEST SET LABEL FILE (t10k-labels-idx1-ubyte):
[offset] [type] [value] [description]
0000 32 bit integer 0x00000801(2049) magic number (MSB first)
0004 32 bit integer 10000 number of items
0008 unsigned byte ?? label
0009 unsigned byte ?? label
........
xxxx unsigned byte ?? label
The labels values are 0 to 9.
:param idx_ubyte_file: idx文件路径
:return: n*1维np.array对象,n为图片数量
"""
return decode_idx1_ubyte(idx_ubyte_file)
|
[
"2714049089@qq.com"
] |
2714049089@qq.com
|
c12227791c9532c511adc49b611291c60354dc51
|
948d84d2e3fc04e353a11384d8570308174242f5
|
/5-Pythonda Koşul İfadeleri/if-else-demo-2.py
|
ee6cf34554eeaa7723e51f9eedd857630f2067ee
|
[] |
no_license
|
omerfarukcelenk/PythonMaster
|
a0084a800b8a41cd2ad538a7ca3687c26dc679ec
|
0db8f8b0ea2e1c2d810c542068cfcf1a3615f581
|
refs/heads/main
| 2023-04-16T17:42:05.501904
| 2021-04-26T21:19:27
| 2021-04-26T21:19:27
| 361,896,109
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,485
|
py
|
'''
1- Girilen bir sayının 0-100 arasında olup olmadığını kontrol ediniz.
sayi = float(input('sayı: '))
if (sayi > 0) and (sayi<=100):
print('sayı 0-100 arasında.')
else:
print('sayı 0-100 arasında değildir.')
'''
'''
2- Girilen bir sayının pozitif çift sayı olup olmadığını kontrol ediniz.
sayi = int(input('sayı: '))
if (sayi > 0):
if (sayi % 2 ==0):
print('girilen sayı pozitif çift sayıdır.')
else:
print('girilen sayı pozitif ancak sayı tek.')
else:
print('girilen sayı negatif sayı.')
'''
'''
3- Email ve parola bilgileri ile giriş kontrolü yapınız.
email = 'email@sadikturan.com'
password = 'abc123'
girilenEmail = input('email: ')
girilenPassword = input('password: ')
if (girilenEmail == email):
if (girilenPassword == password):
print('uygulamaya giriş başarılı.')
else:
print('parolanız yanlış')
else:
print('email bilginiz yanlış')
'''
'''
4- Girilen 3 sayıyı büyüklük olarak karşılaştırınız.
a = int(input('a: '))
b = int(input('b: '))
c = int(input('c: '))
if (a > b) and (a > c):
print(f'a en büyük sayıdır.')
elif (b > a) and (b > c):
print(f'b en büyük sayıdır.')
elif (c > a) and (c > b):
print(f'c en büyük sayıdır.')
'''
'''
5- Kullanıcıdan 2 vize (%60) ve final (%40) notunu alıp ortalama hesaplayınız.
Eğer ortalama 50 ve üstündeyse geçti değilse kaldı yazdırın.
a-) Ortamalama 50 olsa bile final notu en az 50 olmalıdır.
b-) Finalden 70 alındığında ortalamanın önemi olmasın.
vize1 = float(input('vize 1: '))
vize2 = float(input('vize 2: '))
final = float(input('final : '))
ortalama = ((vize1+vize2)/2)*0.6 + (final * 0.4)
result = (ortalama>=50) and (final>=50)
result = (ortalama >=50) or (final>=70)
** durum-1
if (ortalama>=50):
if (final>=50):
print(f'öğrencinin ortalaması: {ortalama} ve geçme durumu: başarılı')
else:
print(f'öğrencinin ortalaması: {ortalama} ve geçme durumu: başarısız. Finalden en az 50 almalısınız.')
else:
print(f'öğrencinin ortalaması: {ortalama} ve geçme durumu: başarısız')
** durum-2
if (ortalama >=50):
print(f'öğrencinin ortalaması: {ortalama} ve geçme durumu: başarılı')
else:
if (final>=70):
print(f'öğrencinin ortalaması: {ortalama} ve geçme durumu: başarılı. Finalden en az 70 aldığınız için geçtiniz.')
else:
print(f'öğrencinin ortalaması: {ortalama} ve geçme durumu: başarısız')
'''
'''
6- Kişinin ad, kilo ve boy bilgilerini alıp kilo indekslerini hesaplayınız.
Formül: (Kilo / boy uzunluğunun karesi)
Aşağıdaki tabloya göre kişi hangi gruba girmektedir.
0-18.4 => Zayıf
18.5-24.9 => Normal
25.0-29.9 => Fazla Kilolu
30.0-34.9 => Şişman (Obez)
name = input('adınız: ')
kg = float(input('kilonuz: '))
hg = float(input('boyunuz: '))
index = (kg) / (hg ** 2)
if (index >= 0) and (index<=18.4):
print(f'{name} kilo indeksin: {index} ve kilo değerlendirmen zayıf.')
elif (index>18.4) and (index<=24.9):
print(f'{name} kilo indeksin: {index} ve kilo değerlendirmen normal.')
elif (index>24.9) and (index<=29.9):
print(f'{name} kilo indeksin: {index} ve kilo değerlendirmen kilolu.')
elif (index>=29.9) and (index<=45.9):
print(f'{name} kilo indeksin: {index} ve kilo değerlendirmen obez.')
else:
print('bilgileriniz yanlış.')
'''
|
[
"omerfar0133@gmail.com"
] |
omerfar0133@gmail.com
|
51d2268bc441dc7e0809af59b51d58def2641769
|
5f10ca2439551040b0af336fd7e07dcc935fc77d
|
/Binary tree/二叉树性质相关题目/求每层的宽度.py
|
dcef6bda78001250e1f9f1a433d3a54382bd13b5
|
[] |
no_license
|
catdog001/leetcode2.0
|
2715797a303907188943bf735320e976d574f11f
|
d7c96cd9a1baa543f9dab28750be96c3ac4dc731
|
refs/heads/master
| 2021-06-02T10:33:41.552786
| 2020-04-08T04:18:04
| 2020-04-08T04:18:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,494
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/2/13 13:06
# @Author : LI Dongdong
# @FileName: 求每层的宽度.py
''''''
'''
题目分析
1.要求:求每层中,左边第一个到右边第一个的宽度
2.理解:
3.类型:
4.确认输入输出及边界条件:
4.方法及方法分析:
time complexity order:
space complexity order:
'''
'''
思路:bfs + [node, index]
方法:
deque record node and index (2* index (+1))
traversal all nodes, calculate index dif of every level node by for loop
time complex:
space complex:
易错点:
'''
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
from collections import deque
class Solution:
def widthOfBinaryTree(self, root: TreeNode) -> int:
if not root: # corner case
return 0
width = 0
res = []
queue = deque()
queue.append([root, 0])
while queue:
width = len(queue)
left = queue[0][1]
right = queue[-1][1]
res.append(right - left + 1)
for _ in range(width): # traversal all same level node
node, index = queue.popleft() # 易错点
if node.left:
queue.append([node.left, index * 2])
if node.right:
queue.append([node.right, index * 2 + 1])
return res
'''
思路:dfs + dic[level:index]
方法:
main: set level, index, dic
helper:
DFS scan every node, renew level and index = index * 2 (+ 1)
save dic[level] = [first node index, other node index]
time complex:
space complex:
易错点:dic[level] = max(index + 1, dic[level])
'''
# 求tree的每层的节点数,求哪一层具有最多节点数,节点数是多少
# input: root
# output: dic:key:每层序数,value:每层的node个数
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def widthOfBinaryTree(self, root: TreeNode) -> int:
if not root: # corner case
return 0
level = 0
index = 0
dic ={} # store level and index
self.res = 0
self.dfs(root, level, index, dic)
return dic
def dfs(self, root, level, index, dic):
if not root: # corner case
return
if level in dic:
dic[level][1] = index
else:
dic[level] = [index, index]
self.dfs(root.left, level + 1, index * 2, dic)
self.dfs(root.right, level + 1, index * 2 + 1, dic)
from collections import deque
def constructTree(nodeList): # input: list using bfs, output: root
new_node = []
for elem in nodeList: # transfer list val to tree node
if elem:
new_node.append(TreeNode(elem))
else:
new_node.append(None)
queue = deque()
queue.append(new_node[0])
resHead = queue[0]
i = 1
while i <= len(new_node) - 1: # bfs method building
head = queue.popleft()
head.left = new_node[i] # build left and push
queue.append(head.left)
if i + 1 == len(new_node): # if no i + 1 in new_node
break
head.right = new_node[i + 1] # build right and push
queue.append(head.right)
i = i + 2
return resHead
root = constructTree([1,2,3,None,5,6])
x = Solution()
x.widthOfBinaryTree(root)
|
[
"lidongdongbuaa@gmail.com"
] |
lidongdongbuaa@gmail.com
|
f1df3479147b367dfc6cc0d007b4386d3a0e7fa8
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/express_route_circuit_authorization_py3.py
|
b4bb3df4dd52a8366e8cb7b9b1d60fa77e023bf0
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,559
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource_py3 import SubResource
class ExpressRouteCircuitAuthorization(SubResource):
"""Authorization in an ExpressRouteCircuit resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:param authorization_key: The authorization key.
:type authorization_key: str
:param authorization_use_status: AuthorizationUseStatus. Possible values
are: 'Available' and 'InUse'. Possible values include: 'Available',
'InUse'
:type authorization_use_status: str or
~azure.mgmt.network.v2017_03_01.models.AuthorizationUseStatus
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Gets name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource
is updated.
:vartype etag: str
"""
_validation = {
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'},
'authorization_use_status': {'key': 'properties.authorizationUseStatus', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, authorization_key: str=None, authorization_use_status=None, provisioning_state: str=None, name: str=None, **kwargs) -> None:
super(ExpressRouteCircuitAuthorization, self).__init__(id=id, **kwargs)
self.authorization_key = authorization_key
self.authorization_use_status = authorization_use_status
self.provisioning_state = provisioning_state
self.name = name
self.etag = None
|
[
"noreply@github.com"
] |
xiafu-msft.noreply@github.com
|
20c78b60b815c01583da61d8d071a7b4e1735589
|
dbf8768bb3818b4003f2e34ff561afb235a3734a
|
/Python/Templates/Django/ProjectTemplates/Python/Web/PollsDjango/app-admin.py
|
898eb59ef40b51c7c1a179375f53430f2d5f5b8c
|
[
"Apache-2.0"
] |
permissive
|
wjk/PTVS
|
bf3880198ba35ae34b12872a86fe2b03d2a82180
|
184b6711a8700a7f9d78f6d6ac3b225f81a8b8b8
|
refs/heads/master
| 2020-12-14T16:11:40.486645
| 2020-01-17T20:45:15
| 2020-01-17T20:45:15
| 234,801,602
| 1
| 0
|
Apache-2.0
| 2020-01-18T21:41:27
| 2020-01-18T21:41:26
| null |
UTF-8
|
Python
| false
| false
| 669
|
py
|
"""
Customizations for the Django administration interface.
"""
from django.contrib import admin
from app.models import Choice, Poll
class ChoiceInline(admin.TabularInline):
"""Choice objects can be edited inline in the Poll editor."""
model = Choice
extra = 3
class PollAdmin(admin.ModelAdmin):
"""Definition of the Poll editor."""
fieldsets = [
(None, {'fields': ['text']}),
('Date information', {'fields': ['pub_date']}),
]
inlines = [ChoiceInline]
list_display = ('text', 'pub_date')
list_filter = ['pub_date']
search_fields = ['text']
date_hierarchy = 'pub_date'
admin.site.register(Poll, PollAdmin)
|
[
"steve.dower@microsoft.com"
] |
steve.dower@microsoft.com
|
b7f93333d8f7f87274baefcfac762f864f7617c2
|
f385e93fb799629318b6f5bbae1a3b29d62d8359
|
/database/citations/asuncion2012a.py
|
ea381f3189a4abf25a7944dc6845c8cf0f359501
|
[] |
no_license
|
JoaoFelipe/ipaw-index
|
bf113649de497d2008922eb80f8ea3bf2cd6aba5
|
f8fe329f0c35b11c84bd76e7b7da7a465d380a02
|
refs/heads/master
| 2020-03-17T19:51:13.892958
| 2018-05-18T00:54:08
| 2018-05-18T00:54:08
| 133,880,857
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
# coding: utf-8
from snowballing.models import *
from snowballing import dbindex
dbindex.last_citation_file = dbindex.this_file(__file__)
from ..work.y2012 import asuncion2012a
from ..work.y2016 import reddish2016a
DB(Citation(
reddish2016a, asuncion2012a, ref="",
contexts=[
],
))
|
[
"joaofelipenp@gmail.com"
] |
joaofelipenp@gmail.com
|
c5af1b1c066c1a49573ef543d9894567d5909df2
|
9e482a31dd8c1661ad1953d7fbd24a532306f58c
|
/Plays/Play10/medium_batch_normalization.py
|
2d0166d6dce71a2e1ec99d39a56e366341b49c99
|
[] |
no_license
|
margaridav27/feup-fpro
|
49a66f6643c83adb948ff110f522948f43508519
|
e805e08d0fdd273db272300e3e9676c585030f23
|
refs/heads/master
| 2023-01-23T01:16:11.534704
| 2020-11-25T10:48:00
| 2020-11-25T10:48:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,115
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 4 10:14:35 2019
@author: Margarida Viera
"""
#solution using generator
def batch_norm(alist, batch_size):
while len(alist) > 0:
batch = []
l = alist.copy()
if len(alist) < batch_size:
batch_size = len(alist)
for i in range(batch_size):
batch.append(l[i])
alist.remove(l[i])
from statistics import median
med = median(batch)
for n in range(len(batch)):
batch[n] -= med
yield batch
#solution using normal function return
def batch_norm(alist, batch_size):
batches = []
while len(alist) > batch_size:
batches.append(alist[:batch_size])
alist = alist[batch_size:]
if len(alist) != 0:
batches.append(alist)
from statistics import median
for batch in batches:
med = median(batch)
for n in range(len(batch)):
batch[n] -= med
return batches
print(batch_norm([10, 1, -12, 5, 1, 3, 7, 3, 3], 5))
|
[
"up201907907@fe.up.pt"
] |
up201907907@fe.up.pt
|
79800f0401cbb5ee3dfdafd55b2f0532cd565719
|
ab37cdd76b8d4da54ff1ce30b0fa2e3dfadd207f
|
/1001-1099/1008/1008.py
|
e2d869f2abde08e4f8eb119987332f9815576b5f
|
[] |
no_license
|
hay86/timus
|
b163d94052d3dedd51c82f5c10874402f805c6e1
|
0d06073228c23538ca785938c862d2b5e08bda63
|
refs/heads/master
| 2023-03-08T06:34:28.707612
| 2021-02-20T14:38:48
| 2021-02-20T14:38:48
| 100,444,783
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,553
|
py
|
import sys
img = [[False for i in range(12)] for j in range(12)]
def one():
for i in range(n):
x, y = [int(j) for j in sys.stdin.readline().split()]
img[x][y] = True
if i == 0:
x0, y0 = x, y
v = [[False for i in range(12)] for j in range(12)]
q = [(x0, y0)]
v[x0][y0] = True
print '%d %d' % (x0, y0)
while len(q) > 0:
x, y = q.pop(0)
o = ''
if img[x+1][y] and not v[x+1][y]:
o += 'R'
q.append((x+1, y))
v[x+1][y] = True
if img[x][y+1] and not v[x][y+1]:
o += 'T'
q.append((x, y+1))
v[x][y+1] = True
if img[x-1][y] and not v[x-1][y]:
o += 'L'
q.append((x-1, y))
v[x-1][y] = True
if img[x][y-1] and not v[x][y-1]:
o += 'B'
q.append((x, y-1))
v[x][y-1] = True
if len(q) == 0:
print '.'
else:
print o+','
def two():
xn, yn = x0, y0
xm, ym = x0, y0
count = 1
q=[(x0, y0)]
img[x0][y0] = True
for line in sys.stdin:
if '.' in line:
break
x, y = q.pop(0)
if 'R' in line:
q.append((x+1, y))
count += 1
img[x+1][y] = True
xm = max(xm, x+1)
if 'T' in line:
q.append((x, y+1))
count += 1
img[x][y+1] = True
ym = max(ym, y+1)
if 'L' in line:
q.append((x-1, y))
count += 1
img[x-1][y] = True
xn = min(xn, x-1)
if 'B' in line:
q.append((x, y-1))
count += 1
img[x][y-1] = True
yn = min(yn, y-1)
print count
for x in range(xn, xm+1):
for y in range(yn, ym+1):
if img[x][y]:
print x, y
line = sys.stdin.readline()
if not ' ' in line:
n = int(line)
one()
else:
x0, y0 = [int(y) for y in line.split()]
two()
|
[
"xukaifeng1986@gmail.com"
] |
xukaifeng1986@gmail.com
|
81a081122381d85928f0ad8fb3aab7f699135e78
|
aca971629c16f16a4b0360669579d0751fd5da67
|
/src/indelPlot.py
|
f896ef722c39a732f5391609743e33d9627abe56
|
[
"MIT"
] |
permissive
|
ngannguyen/referenceViz
|
43769ded8cb3c77445391e26233352a61ed72744
|
6990a00739a712ccd1371e996229882252fa8f91
|
refs/heads/master
| 2021-01-01T06:26:33.975514
| 2012-03-22T21:34:54
| 2012-03-22T21:34:54
| 1,750,339
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,150
|
py
|
#!/usr/bin/env python
"""
Create Snp plots
nknguyen at soe dot ucsc dot edu
Jun 15 2011
"""
import os, sys
from optparse import OptionParser
import xml.etree.ElementTree as ET
#from numpy import *
import libPlotting as libplot
import matplotlib.pyplot as pyplot
import matplotlib.pylab as pylab
from matplotlib.ticker import *
from matplotlib.font_manager import FontProperties
class Sample():
def __init__( self, xmlnode ):
self.name = xmlnode.attrib[ 'sampleName' ]
self.refname = xmlnode.attrib[ 'referenceName' ]
self.dels = float(xmlnode.attrib['totalDeletionPerAlignedBase'])
self.ins = float(xmlnode.attrib['totalInsertionPerAlignedBase'])
self.indel = float(xmlnode.attrib['totalInsertionAndDeletionPerAlignedBase'])
#add cases where it's both insertion & deletion (indel) to insertion & deletion:
self.dels += self.indel
self.ins += self.indel
def readfiles( options ):
statsList = []
for f in options.files:
samples = []
xmltree = ET.parse( f )
root = xmltree.getroot()
for s in root.findall( 'statsForSample' ):
name = s.attrib[ 'sampleName' ]
if name != 'aggregate' and name != 'ROOT' and name != '' and name not in options.filteredSamples:
samples.append( Sample( s ) )
statsList.append( samples )
return statsList
def initOptions( parser ):
parser.add_option( '--outdir', dest='outdir', default='.', help='Output directory' )
#parser.add_option( '--numOutliners', dest='numOutliners', type='int', default=0, help='Number of outliners' )
parser.add_option('--filteredSamples', dest='filteredSamples', help='Hyphen separated list of samples that were filtered out (not to include in the plot)')
def checkOptions( args, options, parser ):
if len( args ) < 2:
parser.error( 'Please provide two snpStats xml files.\n' )
options.files = []
for f in args:
if not os.path.exists(f):
parser.error( 'File %s does not exist.\n' % f )
options.files.append( f )
if options.filteredSamples:
options.filteredSamples = options.filteredSamples.split('-')
else:
options.filteredSamples = []
def setAxes(fig, range1, range2):
axleft = 0.12
axright = 0.95
axwidth = axright - axleft
axbottom = 0.15
axtop = 0.95
axheight = axtop - axbottom
margin = 0.015
h1 = (axheight - margin)*(range1/(range1 + range2))
h2 = axheight - margin - h1
ax2 = fig.add_axes( [axleft, axbottom, axwidth, h2] )
ax = fig.add_axes( [axleft, axbottom + h2 + margin, axwidth, h1] )
return ax, ax2
def drawPlot( options, samples1, samples2, type ):
#Sorted in decreasing order of errorPerSite in samples1
if type == 'insertion':
samples1 = sorted( samples1, key=lambda s:s.ins, reverse=True )
else:
samples1 = sorted( samples1, key=lambda s:s.dels, reverse=True )
if len( samples1 ) < 1:
return
#remove chimpSample:
chimpSample = None
for i, s in enumerate(samples1):
if s.name == 'panTro3':
chimpSample = samples1.pop(i)
break
refname1 = samples1[0].refname
refname2 = samples2[0].refname
y1data = [ s.ins for s in samples1 ]
if type == 'deletion':
y1data = [ s.dels for s in samples1 ]
xticklabels = [ s.name for s in samples1 ]
#indel of refname1 w.r.t itself (0)
y1data.append(0)
xticklabels.append(refname1)
y2data = []
for name in xticklabels:
if name == refname2:#indel of refname2 w.r.t itself (0)
y2data.append(0)
for s in samples2:
if s.name == name:
if type == 'insertion':
y2data.append(s.ins)
else:
y2data.append(s.dels)
break
if len(xticklabels) != len(y2data):
sys.stderr.write("Input file 1 and 2 do not have the same set of samples\n")
sys.exit( 1 )
#add the average column:
num = 1
y1avr = sum(y1data)/float(len(y1data) - 1)
y1data.append(y1avr)
xticklabels.append('average')
y2avr = sum(y2data)/float(len(y2data) - 1)
y2data.append(y2avr)
print "%s Average: %s %f, %s %f" %(type, refname1, y1avr, refname2, y2avr)
#Add chimp:
samples1.append(chimpSample)
if type == 'insertion':
y1data.append( chimpSample.ins )
else:
y1data.append( chimpSample.dels )
for s in samples2:
if s.name == 'panTro3':
if type == 'insertion':
y2data.append(s.ins)
else:
y2data.append(s.dels)
xticklabels.append("panTro3")
minMajority = min( [min(y2data), min(y1data)] ) - 0.0001
maxMajority = max( [max(y2data), max(y1data)] ) + 0.0001
basename = os.path.basename(options.files[0])
options.out = os.path.join( options.outdir, '%s_%s' %( type, basename.lstrip('pathStats').lstrip('_').rstrip('.xml') ) )
fig, pdf = libplot.initImage( 11.2, 10.0, options )
#ax, ax2 = setAxes(fig, maxOutlier - minOutlier, maxMajority - minMajority)
ax2 = fig.add_axes( [0.15, 0.15, 0.8, 0.8] )
l2 = ax2.plot( y2data, marker='.', markersize=14.0, linestyle='none', color="#E31A1C" )
l1 = ax2.plot( y1data, marker='.', markersize=14.0, linestyle='none', color="#1F78B4" )
#Legend
fontP = FontProperties()
fontP.set_size("x-small")
legend = ax2.legend([l1, l2], [libplot.properName(refname1), libplot.properName(refname2)], 'upper right', numpoints=1, prop=fontP)
legend._drawFrame = False
ax2.set_ylim( minMajority, maxMajority )
ax2.set_xlim( -0.5, len(xticklabels) -0.5 )
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.xaxis.tick_bottom()
ax2.yaxis.set_ticks_position( 'left' )
ax2.set_xticks( range( 0, len(xticklabels) ) )
properxticklabels = [ libplot.properName(l) for l in xticklabels ]
ax2.set_xticklabels( properxticklabels )
for label in ax2.xaxis.get_ticklabels():
label.set_rotation( 90 )
ax2.yaxis.grid(b=True, color="#CCCCCC", linestyle='-', linewidth=0.005)
ax2.xaxis.grid(b=True, color="#CCCCCC", linestyle='-', linewidth=0.005)
ax2.set_xlabel( 'Samples' )
title = 'Deletions'
#if type == 'insertion':
if type == 'insertion':
ax2.set_ylabel( 'Insertions per site' )
title = 'Insertions'
else:
ax2.set_ylabel( 'Deletions per site' )
ax2.set_title( title )
libplot.writeImage( fig, pdf, options )
def main():
usage = ('Usage: %prog [options] file1.xml file2.xml\n\n')
parser = OptionParser( usage = usage )
initOptions( parser )
libplot.initOptions( parser )
options, args = parser.parse_args()
checkOptions( args, options, parser )
libplot.checkOptions( options, parser )
statsList = readfiles( options )
drawPlot( options, statsList[0], statsList[1], 'insertion' )
drawPlot( options, statsList[0], statsList[1], 'deletion' )
if __name__ == "__main__":
main()
|
[
"nknguyen@soe.ucsc.edu"
] |
nknguyen@soe.ucsc.edu
|
d82ed1cfeb3e9cf9432826e65574bb198fceddb4
|
01dad4d1d2ffaf2fa070e99fe828d42f59a9f9d1
|
/src/pycrop2ml_ui/packages/SQ_Energy_Balance/src/openalea/Penman.py
|
ab023da150792ac8a482b0e9d2ed39cd94ea4ed8
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
AgriculturalModelExchangeInitiative/Pycrop2ml_ui
|
5e210facf9689348bb57c16060967118b7c5f49a
|
3d5d2b87a74f0be306056b71808286922fef2945
|
refs/heads/master
| 2023-06-24T13:52:39.933728
| 2023-06-17T00:17:26
| 2023-06-17T00:17:26
| 193,912,881
| 0
| 4
|
MIT
| 2023-02-25T13:26:57
| 2019-06-26T13:44:34
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,614
|
py
|
# coding: utf8
import numpy
from math import *
def model_penman(evapoTranspirationPriestlyTaylor = 449.367,
hslope = 0.584,
VPDair = 2.19,
psychrometricConstant = 0.66,
Alpha = 1.5,
lambdaV = 2.454,
rhoDensityAir = 1.225,
specificHeatCapacityAir = 0.00101,
conductance = 598.685):
"""
- Description:
* Title: Penman Model
* Author: Pierre Martre
* Reference: Modelling energy balance in the wheat crop model SiriusQuality2:
Evapotranspiration and canopy and soil temperature calculations
* Institution: INRA/LEPSE Montpellier
* Abstract: This method is used when wind and vapor pressure daily data are available
- inputs:
* name: evapoTranspirationPriestlyTaylor
** min : 0
** default : 449.367
** max : 10000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : rate
** datatype : DOUBLE
** inputtype : variable
** unit : g m-2 d-1
** description : evapoTranspiration of Priestly Taylor
* name: hslope
** min : 0
** default : 0.584
** max : 1000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : auxiliary
** datatype : DOUBLE
** inputtype : variable
** unit : hPa °C-1
** description : the slope of saturated vapor pressure temperature curve at a given temperature
* name: VPDair
** min : 0
** default : 2.19
** max : 1000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : auxiliary
** datatype : DOUBLE
** inputtype : variable
** unit : hPa
** description : vapour pressure density
* name: psychrometricConstant
** parametercategory : constant
** min : 0
** datatype : DOUBLE
** max : 1
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** default : 0.66
** inputtype : parameter
** unit :
** description : psychrometric constant
* name: Alpha
** parametercategory : constant
** min : 0
** datatype : DOUBLE
** max : 100
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** default : 1.5
** inputtype : parameter
** unit :
** description : Priestley-Taylor evapotranspiration proportionality constant
* name: lambdaV
** parametercategory : constant
** min : 0
** datatype : DOUBLE
** max : 10
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** default : 2.454
** inputtype : parameter
** unit :
** description : latent heat of vaporization of water
* name: rhoDensityAir
** parametercategory : constant
** datatype : DOUBLE
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** default : 1.225
** inputtype : parameter
** unit :
** description : Density of air
* name: specificHeatCapacityAir
** parametercategory : constant
** min : 0
** datatype : DOUBLE
** max : 1
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** default : 0.00101
** inputtype : parameter
** unit :
** description : Specific heat capacity of dry air
* name: conductance
** min : 0
** default : 598.685
** max : 10000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : state
** datatype : DOUBLE
** inputtype : variable
** unit : m d-1
** description : conductance
- outputs:
* name: evapoTranspirationPenman
** min : 0
** variablecategory : rate
** max : 5000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** datatype : DOUBLE
** unit : g m-2 d-1
** description : evapoTranspiration of Penman Monteith
"""
evapoTranspirationPenman = evapoTranspirationPriestlyTaylor / Alpha + (1000.0 * (rhoDensityAir * specificHeatCapacityAir * VPDair * conductance / (lambdaV * (hslope + psychrometricConstant))))
return evapoTranspirationPenman
|
[
"ahmedmidingoyi@yahoo.fr"
] |
ahmedmidingoyi@yahoo.fr
|
07fd478a0c99e3470575c12f1bb74ad945580d0c
|
e9a083fb04bf9061a2c49871cfbec9b37ff8f71b
|
/docs/source/conf.py
|
c609679b4ac4c8100904a33ce92c16726bc46c12
|
[] |
no_license
|
olaurino/rama
|
7f86223d66f42c639672da6b8979eacaf56b28ed
|
2c88ca2263ccbf6d0737fea0ac5dc0341d71c53a
|
refs/heads/master
| 2021-01-25T14:49:32.330753
| 2018-06-04T14:25:27
| 2018-06-04T14:25:27
| 123,731,355
| 0
| 2
| null | 2018-05-08T13:25:28
| 2018-03-03T21:05:53
|
Python
|
UTF-8
|
Python
| false
| false
| 5,608
|
py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from rama.utils import Singleton
from sphinx.ext.autodoc import ClassDocumenter
import sphinx_rtd_theme
# -- Project information -----------------------------------------------------
project = 'Rama'
copyright = '2018, Omar Laurino'
author = 'Omar Laurino'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# html_theme_options = {}
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Ramadoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Rama.tex', 'Rama Documentation',
'Omar Laurino', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'rama', 'Rama Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Rama', 'Rama Documentation',
author, 'Rama', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
class SingletonDocumenter(ClassDocumenter):
objtype = 'Singleton'
directivetype = 'class'
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return isinstance(member, Singleton)
def setup(app):
app.add_autodocumenter(SingletonDocumenter)
|
[
"olaurino@cfa.harvard.edu"
] |
olaurino@cfa.harvard.edu
|
8ed992846cecdd828e575dfa6c66da38336b9797
|
acf7457d3a799cb9bff12686d2d616688bcd4b5b
|
/packages/python/plotly/plotly/validators/scatter/legendgrouptitle/font/_family.py
|
630d0965793bea6d3cb6b80383b246586092f423
|
[
"MIT"
] |
permissive
|
plotly/plotly.py
|
f4f61639f08160f16195efc95b5901dc5a937346
|
975a704074f01c078e0fdfa32bdf17130bf89e69
|
refs/heads/master
| 2023-09-06T06:15:08.340035
| 2023-08-24T12:28:14
| 2023-08-24T12:28:14
| 14,579,099
| 14,751
| 2,989
|
MIT
| 2023-09-08T19:55:32
| 2013-11-21T05:53:08
|
Python
|
UTF-8
|
Python
| false
| false
| 554
|
py
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="family",
parent_name="scatter.legendgrouptitle.font",
**kwargs,
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs,
)
|
[
"nicolas@plot.ly"
] |
nicolas@plot.ly
|
12c48d5ff16675e2a8f6a2d902504de1ea724719
|
3540272eb4522c637fb293a924a6ad8d7b365718
|
/tribune/news/models.py
|
33b78f25b5e2b10758918424db9589c053a886ce
|
[] |
no_license
|
nevooronni/tribune
|
b0e80a4613758690702fa88eb99f44f4e8e66a30
|
7218d3514277ce408128b4e8c66da5639cf7dec4
|
refs/heads/master
| 2021-08-14T16:55:46.280863
| 2017-11-16T08:41:18
| 2017-11-16T08:41:18
| 110,561,868
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,167
|
py
|
from django.db import models#import models class configured to allow us to communicate with the db
import datetime as dt
class Editor(models.Model):#created editor class inherits from model class
first_name = models.CharField(max_length = 30)#charfield is the sql equivalent to varchar a string field for small to large size strings
last_name = models.CharField(max_length = 30)
email = models.EmailField()
phone_number = models.CharField(max_length = 10,blank = True)
def save_editor(self):
self.save()
def delete_editor(self):
self.delete()
#def display_all(self):
#self.objects.all()
#this updates our models so we can easily read it in the shell
def __str__(self):#string representation of our model
return self.first_name
class Meta:
ordering = ['first_name']
class tags(models.Model):
name = models.CharField(max_length = 30)
def __str__(self):
return self.name
class Article(models.Model):
title = models.CharField(max_length = 60)
post = models.TextField()#textarea tag in html
editor = models.ForeignKey(Editor)#foreign key column defines one to many relationship to editor
tags = models.ManyToManyField(tags)#many to many relationship with the tags class
pub_date = models.DateTimeField(auto_now_add=True)#timestamp to establish when the articles were published
article_image = models.ImageField(upload_to = 'articles/')#image field takes upload_to argument defines where the image will be stored in the file system.
#def save_article(self):
#self.save()
def __str__(self):
return self.title
@classmethod
def todays_news(cls):
today = dt.date.today()#module to get todays date
news = cls.objects.filter(pub_date__date = today)#qeury db to filter articles by current date
return news
@classmethod
def day_news(cls,date):#takes date object as an argument
news = cls.objects.filter(pub_date__date = date)
return news
@classmethod
def search_by_title(cls,search_term):
news = cls.objects.filter(title__icontains=search_term)#will filter our model data using the __icontains filter will check if any word in the title field of our articles matches the search_term
return news
|
[
"nevooronni@gmail.com"
] |
nevooronni@gmail.com
|
902d047c818eadc130281316b90cfde772634bd0
|
6b1aaded6a6d7ad8133eb93f5570d087b9ecefc0
|
/57.py
|
a778a4be509d89c1f7027528296bead44d2f43f7
|
[] |
no_license
|
huangyingw/Leetcode-Python
|
53a772e1ecf298c829c0f058faa54c420420b002
|
9513e215d40145a5f2f40095b459693c79c4b560
|
refs/heads/master
| 2021-07-16T10:10:02.457765
| 2020-07-01T05:35:21
| 2020-07-01T05:35:21
| 192,150,219
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 813
|
py
|
# Definition for an interval.
# class Interval:
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution:
def insert(self, intervals, newInterval):
"""
:type intervals: List[Interval]
:type newInterval: Interval
:rtype: List[Interval]
"""
res = []
i = 0
while i < len(intervals) and intervals[i].end < newInterval.start:
res.append(intervals[i])
i += 1
while i < len(intervals) and intervals[i].start <= newInterval.end:
newInterval.start = min(intervals[i].start, newInterval.start)
newInterval.end = max(intervals[i].end, newInterval.end)
i += 1
res.append(newInterval)
res.extend(intervals[i:])
return res
|
[
"tiant@qualtrics.com"
] |
tiant@qualtrics.com
|
50d9115c118e9ba2d5ebc6b7d58c98818ecd010c
|
bd4734d50501e145bc850426c8ed595d1be862fb
|
/7Kyu - Growth of a Populationdef nb_year-p0- percent- aug- p- count - 0 while-p0-p- p0 - p0 - p0-percent/7Kyu - Growth of a Population.py
|
1259b444757fdd602a08df17a598257ae2dcc7d2
|
[] |
no_license
|
OrdinaryCoder00/CODE-WARS-PROBLEMS-SOLUTIONS
|
f61ff9e5268305519ffeed4964589289f4148cfd
|
5711114ddcc6a5f22f143d431b2b2e4e4e8ac9fb
|
refs/heads/master
| 2021-10-23T09:09:45.670850
| 2019-03-16T13:24:17
| 2019-03-16T13:24:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
def nb_year(p0, percent, aug, p):
count = 0
while(p0<p):
p0 = p0 + p0*(percent/100) + aug
count = count + 1
return count
|
[
"noreply@github.com"
] |
OrdinaryCoder00.noreply@github.com
|
57b906c83a2d61619b54bb2afe90bb43616f21ce
|
4111ca5a73a22174f189361bef654c3f91c3b7ed
|
/Lintcode/Ladder_47_BigData/128. Hash Function.py
|
6d7607ff24020011e66b6d9c8c4b6774644f2261
|
[
"MIT"
] |
permissive
|
ctc316/algorithm-python
|
58b541b654509ecf4e9eb8deebfcbdf785699cc4
|
ac4580d55e05e93e407c6156c9bb801808027d60
|
refs/heads/master
| 2020-03-16T06:09:50.130146
| 2019-08-02T02:50:49
| 2019-08-02T02:50:49
| 132,548,222
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
class Solution:
"""
@param key: A string you should hash
@param HASH_SIZE: An integer
@return: An integer
"""
def hashCode(self, key, HASH_SIZE):
code = 0
for ch in key:
code = (code * 33 + ord(ch)) % HASH_SIZE
return code
|
[
"mike.tc.chen101@gmail.com"
] |
mike.tc.chen101@gmail.com
|
3f7c1ecec5f580016a5068bd48f1de2040b2bf6b
|
0a801544da5ad2f1969348512f7def8fa9176c7d
|
/backend/simplicite_23801/urls.py
|
25d91cd59bdf8a1ad01544b6a94abcbe42abf6ab
|
[] |
no_license
|
crowdbotics-apps/simplicite-23801
|
75477b76531c2c53992f74183e9e2e80aefd22e4
|
e6a6f569c61449e50988201cf58cc5203d23e039
|
refs/heads/master
| 2023-02-11T12:27:04.807579
| 2021-01-13T03:01:57
| 2021-01-13T03:01:57
| 329,176,579
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,668
|
py
|
"""simplicite_23801 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("api/v1/", include("task.api.v1.urls")),
path("task/", include("task.urls")),
path("api/v1/", include("task_profile.api.v1.urls")),
path("task_profile/", include("task_profile.urls")),
path("api/v1/", include("tasker_business.api.v1.urls")),
path("tasker_business/", include("tasker_business.urls")),
path("api/v1/", include("location.api.v1.urls")),
path("location/", include("location.urls")),
path("api/v1/", include("wallet.api.v1.urls")),
path("wallet/", include("wallet.urls")),
path("api/v1/", include("task_category.api.v1.urls")),
path("task_category/", include("task_category.urls")),
path("home/", include("home.urls")),
]
admin.site.site_header = "Simplicite"
admin.site.site_title = "Simplicite Admin Portal"
admin.site.index_title = "Simplicite Admin"
# swagger
api_info = openapi.Info(
title="Simplicite API",
default_version="v1",
description="API documentation for Simplicite App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
c826ef5b94146713cbfb40ea8d2456a72ea50850
|
11137bde91389c04a95df6f6fdaf64f7f49f5f80
|
/introduction_MIT/16_2图表会骗人.py
|
582608abe22748eee617a7fb4bb7c90df1af46fb
|
[] |
no_license
|
starschen/learning
|
cf3c5a76c867567bce73e9cacb2cf0979ba053d9
|
34decb8f9990117a5f40b8db6dba076a7f115671
|
refs/heads/master
| 2020-04-06T07:02:56.444233
| 2016-08-24T08:11:49
| 2016-08-24T08:11:49
| 39,417,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,191
|
py
|
#encoding:utf8
#16_2图表会骗人.py
#绘制房价
import pylab
def plotHousing(impression):
'''假设impression是一个字符串,必须是‘flat’, ‘volatile’或者是‘fair’
生成房价随时间变化的图表'''
f=open('midWestHousingPrices.txt','r')
#文件的每一行是年季度价格
#数据来自美国中部区域
labels,prices=([],[])
for line in f:
year,quarter,price=line.split(' ')
label=year[2:4]+'\n Q'+quarter[1]
labels.append(label)
prices.append(float(price)/1000)
quarters=pylab.arange(len(labels))
width=0.8
if impression=='flat':
pylab.semilogy()
pylab.bar(quarters,prices,width)
pylab.xticks(quarters+width/2.0,labels)
pylab.title('Housing Prices in U.S. Midwest')
pylab.xlabel('Quarter')
pylab.ylabel('Average Price($1,000\'s)')
if impression=='flat':
pylab.ylim(10,10**3)
elif impression =='volatile':
pylab.ylim(180,220)
elif impression=='fair':
pylab.ylim(150,250)
else:
raise ValueError
plotHousing('flat')
pylab.figure()
plotHousing('volatile')
pylab.figure()
plotHousing('fair')
pylab.show()
|
[
"stars_chenjiao@163.com"
] |
stars_chenjiao@163.com
|
1b2a6c3181548e466eacfa3b040cbc883242e73b
|
35f1a21affd266e0069bfc5a1c83218847f13802
|
/pastie-5073437.py
|
9cc5c027ce00562a66e8461c02cfa8768964c92c
|
[] |
no_license
|
KarenWest/pythonClassProjects
|
ff1e1116788174a2affaa96bfcb0e97df3ee92da
|
5aa496a71d36ffb9892ee6e377bd9f5d0d8e03a0
|
refs/heads/master
| 2016-09-16T15:20:26.882688
| 2014-02-21T20:07:57
| 2014-02-21T20:07:57
| 17,055,355
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
#balance = 5000
#annualInterestRate = 0.18
#monthlyPaymentRate = 0.02
months = range(1, 13) # 1, 2, ... , 11, 12
owe = balance # It made sense to me to call this total amount oweing
totalPaid = 0
for month in months:
minPay = owe * monthlyPaymentRate # calculate our minimum payment
interest = (owe - minPay) * (annualInterestRate / 12) # same for interest
owe = owe - minPay + interest # calculate our new balance
totalPaid += minPay # Sum up how much we've paid so far
print('Month: %d' % month) # %d will be replaced by month
print('Minimum monthly payment: %.2f' % minPay) # %.2f replaced by minPay, with 2 decimal places
print('Remaining balance: %.2f' % owe)
print('Total paid %.2f' % totalPaid)
print('Remaining balance: %.2f' % owe)
|
[
"KarenWest15@gmail.com"
] |
KarenWest15@gmail.com
|
4d909ba4892e6c9c564466ba0ea7fe903b3857ab
|
8bc7ba8eb10e30b38f2bcf00971bfe540c9d26b7
|
/paxes_cinder/k2aclient/v1/virtualswitch_manager.py
|
6d956eb5bdf9ebcdab6aba2f1378e4c6151dbbdc
|
[
"Apache-2.0"
] |
permissive
|
windskyer/k_cinder
|
f8f003b2d1f9ca55c423ea0356f35a97b5294f69
|
000ee539ee4842a158071d26ee99d12c7c0a87da
|
refs/heads/master
| 2021-01-10T02:19:51.072078
| 2015-12-08T15:24:33
| 2015-12-08T15:24:33
| 47,629,931
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,938
|
py
|
#
#
# =================================================================
# =================================================================
"""VirtualSwitch interface."""
from paxes_cinder.k2aclient import base
from paxes_cinder.k2aclient.v1 import k2uom
class VirtualSwitchManager(base.ManagerWithFind):
"""Manage :class:`ClientNetworkAdapter` resources."""
resource_class = k2uom.VirtualSwitch
def new(self):
return self.resource_class(self, None)
def list(self, managedsystem, xa=None):
"""Get a list of all VirtualSwitch for a particular
ManagedSystem accessed through a particular hmc.
:rtype: list of :class:`ClientNetworkAdapter`.
"""
return self._list("/rest/api/uom/ManagedSystem/%s/VirtualSwitch"
% managedsystem, xa=xa)
def get(self, managedsystem, virtualswitch, xa=None):
"""Given managedsystem, get a specific VirtualSwitch.
:param virtualswitch: The ID of the :class:`VirtualSwitch`.
:rtype: :class:`VirtualSwitch`
"""
return self._get("/rest/api/uom/ManagedSystem/%s/VirtualSwitch/%s"
% (managedsystem, virtualswitch,),
xa=xa)
def delete(self, managedsystem, virtualswitch, xa=None):
"""Delete the specified instance
"""
return self._delete("uom",
managedsystem,
child=virtualswitch,
xa=xa)
def deletebyid(self, managedsystem_id, virtualswitch_id, xa=None):
"""Delete the specified instance
"""
return self._deletebyid("uom",
"ManagedSystem",
managedsystem_id,
child_type=k2uom.VirtualSwitch,
child_id=virtualswitch_id,
xa=xa)
|
[
"leidong@localhost"
] |
leidong@localhost
|
e463337960661352eec76273356b1323176686ca
|
04ae1836b9bc9d73d244f91b8f7fbf1bbc58ff29
|
/170/Solution.py
|
1ece9965f93617191e3d6e484aeefd64c54f0c67
|
[] |
no_license
|
zhangruochi/leetcode
|
6f739fde222c298bae1c68236d980bd29c33b1c6
|
cefa2f08667de4d2973274de3ff29a31a7d25eda
|
refs/heads/master
| 2022-07-16T23:40:20.458105
| 2022-06-02T18:25:35
| 2022-06-02T18:25:35
| 78,989,941
| 14
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,272
|
py
|
"""
Design and implement a TwoSum class. It should support the following operations: add and find.
add - Add the number to an internal data structure.
find - Find if there exists any pair of numbers which sum is equal to the value.
Example 1:
add(1); add(3); add(5);
find(4) -> true
find(7) -> false
Example 2:
add(3); add(1); add(2);
find(3) -> true
find(6) -> false
"""
from collections import defaultdict
class TwoSum:
def __init__(self):
"""
Initialize your data structure here.
"""
self.count = 0
self.numbers = defaultdict(list)
def add(self, number):
"""
Add the number to an internal data structure..
:type number: int
:rtype: void
"""
self.numbers[number].append(self.count)
self.count += 1
def find(self, value):
"""
Find if there exists any pair of numbers which sum is equal to the value.
:type value: int
:rtype: bool
"""
for num, indexs in self.numbers.items():
tmp = value - num
if tmp in self.numbers:
if tmp == num and len(indexs) > 1:
return True
if tmp != num:
return True
return False
class TwoSum(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.nums = {}
def add(self, number):
"""
Add the number to an internal data structure..
:type number: int
:rtype: None
"""
if number not in self.nums:
self.nums[number] = 1
else:
self.nums[number] += 1
def find(self, value):
"""
Find if there exists any pair of numbers which sum is equal to the value.
:type value: int
:rtype: bool
"""
for key in self.nums:
second = value - key
if ((key != second) and second in self.nums) or (key == second and self.nums[key] > 1):
return True
return False
# Your TwoSum object will be instantiated and called as such:
# obj = TwoSum()
# obj.add(number)
# param_2 = obj.find(value)
|
[
"zrc720@gmail.com"
] |
zrc720@gmail.com
|
51bacc265849e99d5638fe2aa84fb25204c57781
|
a0dda8be5892a390836e19bf04ea1d098e92cf58
|
/叶常春视频例题/chap05/5-2-9-生词清单.py
|
6c9697103cde9015e7e96499929c29627c18643d
|
[] |
no_license
|
wmm98/homework1
|
d9eb67c7491affd8c7e77458ceadaf0357ea5e6b
|
cd1f7f78e8dbd03ad72c7a0fdc4a8dc8404f5fe2
|
refs/heads/master
| 2020-04-14T19:22:21.733111
| 2019-01-08T14:09:58
| 2019-01-08T14:09:58
| 164,055,018
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 307
|
py
|
# 例5-2-9 生词清单
new_words = []
for i in range(1, 101):
word = input("输入生词:")
if word == "*":
break # break语句的作用是跳出循环,执行循环后面的语句。
if word not in new_words:
new_words.append(word)
print("生词清单:", new_words)
|
[
"792545884@qq.com"
] |
792545884@qq.com
|
d4e630393d97b23b24b61c540310af9eced66716
|
4d4fcde3efaa334f7aa56beabd2aa26fbcc43650
|
/server/src/uds/core/managers/userservice/comms.py
|
e2b06381d46b8a78ab640c8bf0c609b7fff00dda
|
[] |
no_license
|
xezpeleta/openuds
|
a8b11cb34eb0ef7bb2da80f67586a81b2de229ef
|
840a7a02bd7c9894e8863a8a50874cdfdbf30fcd
|
refs/heads/master
| 2023-08-21T17:55:48.914631
| 2021-10-06T10:39:06
| 2021-10-06T10:39:06
| 414,489,331
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,264
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019-2021 Virtual Cable S.L.U.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Virtual Cable S.L.U. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
.. moduleauthor:: Adolfo Gómez, dkmaster at dkmon dot com
"""
import os
import json
import base64
import tempfile
import logging
import typing
import requests
if typing.TYPE_CHECKING:
from uds.models import UserService
logger = logging.getLogger(__name__)
TIMEOUT = 2
class NoActorComms(Exception):
pass
class OldActorVersion(NoActorComms):
pass
def _requestActor(
userService: 'UserService',
method: str,
data: typing.Optional[typing.MutableMapping[str, typing.Any]] = None,
minVersion: typing.Optional[str] = None,
) -> typing.Any:
"""
Makes a request to actor using "method"
if data is None, request is done using GET, else POST
if no communications url is provided or no min version, raises a "NoActorComms" exception (or OldActorVersion, derived from NoActorComms)
Returns request response value interpreted as json
"""
url = userService.getCommsUrl()
if not url:
# logger.warning('No notification is made because agent does not supports notifications: %s', userService.friendly_name)
raise NoActorComms(
'No notification urls for {}'.format(userService.friendly_name)
)
minVersion = minVersion or '2.0.0'
version = userService.getProperty('actor_version') or '0.0.0'
if '-' in version or version < minVersion:
logger.warning(
'Pool %s has old actors (%s)', userService.deployed_service.name, version
)
raise OldActorVersion(
'Old actor version {} for {}'.format(version, userService.friendly_name)
)
url += '/' + method
proxy = userService.deployed_service.proxy
try:
if proxy:
r = proxy.doProxyRequest(url=url, data=data, timeout=TIMEOUT)
else:
verify: typing.Union[bool, str]
cert = userService.getProperty('cert')
# cert = '' # Untils more tests, keep as previous.... TODO: Fix this when fully tested
if cert:
# Generate temp file, and delete it after
verify = tempfile.mktemp('udscrt')
with open(verify, 'wb') as f:
f.write(cert.encode()) # Save cert
else:
verify = False
if data is None:
r = requests.get(url, verify=verify, timeout=TIMEOUT)
else:
r = requests.post(
url,
data=json.dumps(data),
headers={'content-type': 'application/json'},
verify=verify,
timeout=TIMEOUT,
)
if verify:
try:
os.remove(typing.cast(str, verify))
except Exception:
logger.exception('removing verify')
js = r.json()
if version >= '3.0.0':
js = js['result']
logger.debug('Requested %s to actor. Url=%s', method, url)
except Exception as e:
logger.warning(
'Request %s failed: %s. Check connection on destination machine: %s',
method,
e,
url,
)
js = None
return js
def notifyPreconnect(userService: 'UserService', userName: str, protocol: str) -> None:
"""
Notifies a preconnect to an user service
"""
ip, hostname = userService.getConnectionSource()
try:
_requestActor(
userService,
'preConnect',
{'user': userName, 'protocol': protocol, 'ip': ip, 'hostname': hostname},
)
except NoActorComms:
pass # If no preconnect, warning will appear on UDS log
def checkUuid(userService: 'UserService') -> bool:
"""
Checks if the uuid of the service is the same of our known uuid on DB
"""
try:
uuid = _requestActor(userService, 'uuid')
if (
uuid and uuid != userService.uuid
): # Empty UUID means "no check this, fixed pool machine"
logger.info(
'Machine %s do not have expected uuid %s, instead has %s',
userService.friendly_name,
userService.uuid,
uuid,
)
return False
except NoActorComms:
pass
return True # Actor does not supports checking
def requestScreenshot(userService: 'UserService') -> bytes:
"""
Returns an screenshot in PNG format (bytes) or empty png if not supported
"""
emptyPng = 'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg=='
try:
png = _requestActor(
userService, 'screenshot', minVersion='3.0.0'
) # First valid version with screenshot is 3.0
except NoActorComms:
png = None
return base64.b64decode(png or emptyPng)
def sendScript(userService: 'UserService', script: str, forUser: bool = False) -> None:
"""
If allowed, send script to user service
"""
try:
data: typing.MutableMapping[str, typing.Any] = {'script': script}
if forUser:
data['user'] = forUser
_requestActor(userService, 'script', data=data)
except NoActorComms:
pass
def requestLogoff(userService: 'UserService') -> None:
"""
Ask client to logoff user
"""
try:
_requestActor(userService, 'logout', data={})
except NoActorComms:
pass
def sendMessage(userService: 'UserService', message: str) -> None:
"""
Sends an screen message to client
"""
try:
_requestActor(userService, 'message', data={'message': message})
except NoActorComms:
pass
|
[
"dkmaster@dkmon.com"
] |
dkmaster@dkmon.com
|
c886c20cf7de550debdfb0a88d9edcbafb45a992
|
0726db2d56b29f02a884885718deeddbf86df628
|
/lienp/visualize.py
|
af217e55f8ec1524e381eb022bcd47bbb2f01101
|
[] |
no_license
|
makora9143/EquivCNP
|
515dfd95557d8d3a21d3fc0f295ce885a9deb913
|
a78dea12ab672e796c86427823c9f1b2fdd8df8d
|
refs/heads/master
| 2023-03-17T04:34:26.320055
| 2021-03-05T18:02:18
| 2021-03-05T18:02:18
| 254,292,834
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,239
|
py
|
import io
import PIL.Image
import matplotlib.pyplot as plt
import torch
from torchvision.transforms import ToTensor, ToPILImage
from torchvision.utils import make_grid
def mnist_plot_function(target_x, target_y, context_x, context_y):
img = torch.zeros((28, 28, 3))
img[:, :, 2] = torch.ones((28, 28))
idx = (context_x + 14).clamp(0, 27).long()
img[idx[:, 0], idx[:, 1]] = context_y
print(f'num context:{context_x.shape[0]}')
plt.figure(figsize=(8, 4))
plt.subplot(121)
plt.imshow(img.numpy())
plt.gray()
plt.subplot(122)
plt.imshow(target_y.reshape(28, 28).numpy())
plt.show()
def plot_and_save_image(ctxs, tgts, preds, epoch=None):
ctx_img = []
tgt_img = []
pred_img = []
for ctx, tgt, tgt_y_dist in zip(ctxs, tgts, preds):
ctx_coords, ctx_values = ctx
tgt_coords, tgt_values = tgt
img = torch.zeros((28, 28, 3))
img[:, :, 2] = torch.ones((28, 28))
idx = (ctx_coords[0] + 14).clamp(0, 27).long()
img[idx[:, 0], idx[:, 1]] = ctx_values[0]
ctx_img.append(img.unsqueeze(0))
tgt_img.append(tgt_values.reshape(1, 1, 28, 28).repeat(1, 3, 1, 1))
pred_img.append(tgt_y_dist.mean.reshape(1, 1, 28, 28).repeat(1, 3, 1, 1))
ctx_img = torch.cat(ctx_img, 0).permute(0, 3, 1, 2).unsqueeze(1).to(torch.device('cpu'))
tgt_img = torch.cat(tgt_img, 0).unsqueeze(1).to(torch.device('cpu'))
pred_img = torch.cat(pred_img, 0).unsqueeze(1).to(torch.device('cpu'))
img = torch.cat([ctx_img, tgt_img, pred_img], 1).reshape(-1, 3, 28, 28)
img = make_grid(img, nrow=6).permute(1, 2, 0).clamp(0, 1)
plt.imsave("epoch_{}.png".format(epoch if epoch is not None else "test"), img.numpy())
def plot_and_save_image2(ctxs, tgts, preds, img_shape, epoch=None):
ctx_img = []
tgt_img = []
pred_img = []
C, W, H = img_shape
for ctx_mask, tgt, tgt_y_dist in zip(ctxs, tgts, preds):
img = torch.zeros((W, H, 3))
img[:, :, 2] = torch.ones((W, H))
img[ctx_mask[0, 0] == 1] = tgt[0, 0][ctx_mask[0, 0] == 1].unsqueeze(-1)
ctx_img.append(img)
tgt_img.append(tgt.repeat(1, 3//C, 1, 1))
pred_img.append(tgt_y_dist.mean.reshape(1, W, H, C).repeat(1, 1, 1, 3//C))
ctx_img = torch.stack(ctx_img, 0).permute(0, 3, 1, 2).unsqueeze(1).to(torch.device('cpu'))
tgt_img = torch.cat(tgt_img, 0).unsqueeze(1).to(torch.device('cpu'))
pred_img = torch.cat(pred_img, 0).unsqueeze(1).to(torch.device('cpu')).permute(0, 1, 4, 2, 3)
img = torch.cat([ctx_img, tgt_img, pred_img], 1).reshape(-1, 3, W, H)
img = make_grid(img, nrow=6).permute(1, 2, 0).clamp(0, 1)
plt.imsave("epoch_{}.png".format(epoch if epoch is not None else "test"), img.numpy())
def plot_and_save_graph(ctxs, tgts, preds, gp_preds, epoch=None):
graphs = []
for ctx, tgt, tgt_y_dist, gp_dist in zip(ctxs, tgts, preds, gp_preds):
ctx_coords, ctx_values = ctx
tgt_coords, tgt_values = tgt
mean = tgt_y_dist.mean.cpu()
lower, upper = tgt_y_dist.confidence_region()
gp_mean = gp_dist.mean.cpu()
gp_lower, gp_upper = gp_dist.confidence_region()
plt.plot(tgt_coords.reshape(-1).cpu(), gp_mean.detach().cpu().reshape(-1), color='green')
plt.fill_between(tgt_coords.cpu().reshape(-1), gp_lower.detach().cpu().reshape(-1), gp_upper.detach().cpu().reshape(-1), alpha=0.2, color='green')
plt.plot(tgt_coords.reshape(-1).cpu(), mean.detach().cpu().reshape(-1), color='blue')
plt.fill_between(tgt_coords.cpu().reshape(-1), lower.detach().cpu().reshape(-1), upper.detach().cpu().reshape(-1), alpha=0.2, color='blue')
plt.plot(tgt_coords.reshape(-1).cpu(), tgt_values.reshape(-1), '--', color='gray')
plt.plot(ctx_coords.reshape(-1).cpu(), ctx_values.reshape(-1).cpu(), 'o', color='black')
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
plt.clf()
plt.close()
img = PIL.Image.open(buf)
img = ToTensor()(img)
buf.close()
graphs.append(img)
img = ToPILImage()(make_grid(torch.stack(graphs, 0), nrow=2))
img.save("epoch_{}.png".format(epoch if epoch is not None else "test"))
|
[
"makoto.kawano@gmail.com"
] |
makoto.kawano@gmail.com
|
d019acbd04f6f92c44b1c6b5ef4f6c1d988e6d74
|
c50c22c8f814c8d9b697337891904aa0be0edf56
|
/shortest_string.py
|
b26e0a4d78a888e7c2d4a6252bf9b0d4e510728a
|
[] |
no_license
|
mhiloca/Codewars
|
a6dc6e8ea5e5c1e97fb4a3d01a059b3120b556b7
|
3155e4b20fbd96c8e7fbe6564014a136d095c079
|
refs/heads/master
| 2020-07-11T12:41:19.997254
| 2019-11-01T12:44:38
| 2019-11-01T12:44:38
| 204,541,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
"""
Simple, given a string of words, return the length of the shortest word(s).
String will never be empty and you do not need to account for different data types.
"""
def find_short(s):
return min(len(x) for x in s)
|
[
"mhiloca@gmail.com"
] |
mhiloca@gmail.com
|
c5d4d2f46c24a51bf6824c2c8735e80bc1f67f80
|
3f4464c932403615c1fbbaf82eaec096426b1ef5
|
/StartOutPy4/CH6 Files and Exceptions/write_sales.py
|
ecb7c7a8611babac6e9db4c42a7bbdc92ed31f8e
|
[] |
no_license
|
arcstarusa/prime
|
99af6e3fed275982bf11ada7bf1297294d527e91
|
5f1102aa7b6eaba18f97eb388525d48ab4cac563
|
refs/heads/master
| 2020-03-22T14:07:08.079963
| 2019-05-09T11:45:21
| 2019-05-09T11:45:21
| 140,154,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 740
|
py
|
# This program prompts the user for sales amounts
# and writes those amounts to the sales.txt file. 6-8
def main():
# Get the numbers of days.
num_days = int(input('For how many days do ' + 'you have sales? '))
# Open a nuew file named sales.txt.
sales_file = open('sales.txt', 'w')
# Get the amount of sales for each day and write
# it to the file.
for count in range (1, num_days + 1):
# Get the sales for a day.
sales = float(input('Enter the sales for day #' + str(count) + ': '))
# Write the sales amount to the file.
sales_file.write(str(sales) + '\n')
# Close the file.
sales_file.close()
print('Data written to sales.txt')
# Call the main function.
main()
|
[
"40938410+edwardigarashi@users.noreply.github.com"
] |
40938410+edwardigarashi@users.noreply.github.com
|
ee3ca22e9c0f5e05bbf59b552966f070d8a674d9
|
8613ec7f381a6683ae24b54fb2fb2ac24556ad0b
|
/20~29/ABC021/honest.py
|
9c7f3d43c9ab13670129a435bb52b6b7b42038ab
|
[] |
no_license
|
Forest-Y/AtCoder
|
787aa3c7dc4d999a71661465349428ba60eb2f16
|
f97209da3743026920fb4a89fc0e4d42b3d5e277
|
refs/heads/master
| 2023-08-25T13:31:46.062197
| 2021-10-29T12:54:24
| 2021-10-29T12:54:24
| 301,642,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
n = int(input())
a, b = map(int, input().split())
m = int(input())
x, y = [0] * m, [0] * m
data = [[] for _ in range(n)]
for i in range(m):
x, y = map(int, input().split())
data[x - 1].append(y - 1)
data[y - 1].append(x - 1)
dist = [[-1] * n for i in range(n)]
dist[a - 1][b - 1] = 0
|
[
"yuuya15009@gmail.com"
] |
yuuya15009@gmail.com
|
4bbad83e050e46e0bd882f7147d3faa597ef6614
|
26d6c34df00a229dc85ad7326de6cb5672be7acc
|
/msgraph-cli-extensions/beta/files_beta/setup.py
|
917c1b376b39507335d14d388323f62f011a8a2c
|
[
"MIT"
] |
permissive
|
BrianTJackett/msgraph-cli
|
87f92471f68f85e44872939d876b9ff5f0ae6b2c
|
78a4b1c73a23b85c070fed2fbca93758733f620e
|
refs/heads/main
| 2023-06-23T21:31:53.306655
| 2021-07-09T07:58:56
| 2021-07-09T07:58:56
| 386,993,555
| 0
| 0
|
NOASSERTION
| 2021-07-17T16:56:05
| 2021-07-17T16:56:05
| null |
UTF-8
|
Python
| false
| false
| 1,858
|
py
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup, find_packages
# HISTORY.rst entry.
VERSION = '0.1.0'
try:
from azext_files_beta.manual.version import VERSION
except ImportError:
pass
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = []
try:
from azext_files_beta.manual.dependency import DEPENDENCIES
except ImportError:
pass
with open('README.md', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='files_beta',
version=VERSION,
description='Microsoft Azure Command-Line Tools Files Extension',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli-extensions/tree/master/files_beta',
long_description=README + '\n\n' + HISTORY,
license='MIT',
classifiers=CLASSIFIERS,
packages=find_packages(),
install_requires=DEPENDENCIES,
package_data={'azext_files_beta': ['azext_metadata.json']},
)
|
[
"japhethobalak@gmail.com"
] |
japhethobalak@gmail.com
|
d005e74749c88692012dd32e899d20852ffbc130
|
c223e858c9ebf1b734221e4db4b3d594993a5536
|
/thespian/system/timing.py
|
ec5f22afa79083a0f4d2f9c23f08117403194959
|
[
"MIT"
] |
permissive
|
jfasenfest/Thespian
|
17f9738aff648328a40f94d3225427d82fe27e39
|
5979a2c9791b774fb620253bb62253c95cf7d4b5
|
refs/heads/master
| 2020-12-26T00:27:09.446001
| 2016-11-28T21:48:20
| 2016-11-28T21:48:20
| 48,067,029
| 0
| 0
| null | 2016-11-21T23:17:52
| 2015-12-15T20:24:12
|
Python
|
UTF-8
|
Python
| false
| false
| 6,566
|
py
|
from datetime import datetime, timedelta
###
### Time Management
###
def timePeriodSeconds(basis, other=None):
if isinstance(basis, datetime):
if isinstance(other, datetime):
return timePeriodSeconds(other - basis)
if isinstance(basis, timedelta):
try:
return basis.total_seconds()
except AttributeError:
# Must be Python 2.6... which doesn't have total_seconds yet
return (basis.days * 24.0 * 60 * 60) + basis.seconds + (basis.microseconds / 1000.0 / 1000)
raise TypeError('Cannot determine time from a %s argument'%str(type(basis)))
def toTimeDeltaOrNone(timespec):
if timespec is None: return None
if isinstance(timespec, timedelta): return timespec
if isinstance(timespec, int): return timedelta(seconds=timespec)
if isinstance(timespec, float):
return timedelta(seconds=int(timespec),
microseconds = int((timespec - int(timespec)) * 1000 * 1000))
raise TypeError('Unknown type for timespec: %s'%type(timespec))
class ExpiryTime(object):
def __init__(self, duration):
self._time_to_quit = None if duration is None else (datetime.now() + duration)
def expired(self):
return False if self._time_to_quit is None else (datetime.now() >= self._time_to_quit)
def remaining(self, forever=None):
return forever if self._time_to_quit is None else \
(timedelta(seconds=0) if datetime.now() > self._time_to_quit else \
(self._time_to_quit - datetime.now()))
def remainingSeconds(self, forever=None):
return forever if self._time_to_quit is None else \
(0 if datetime.now() > self._time_to_quit else \
timePeriodSeconds(self._time_to_quit - datetime.now()))
def __str__(self):
if self._time_to_quit is None: return 'Forever'
if self.expired():
return 'Expired_for_%s'%(datetime.now() - self._time_to_quit)
return 'Expires_in_' + str(self.remaining())
def __eq__(self, o):
if isinstance(o, timedelta):
o = ExpiryTime(o)
if self._time_to_quit == o._time_to_quit: return True
if self._time_to_quit == None or o._time_to_quit == None: return False
if self.expired() and o.expired(): return True
return abs(self._time_to_quit - o._time_to_quit) < timedelta(microseconds=1)
def __lt__(self, o):
try:
if self._time_to_quit is None and o._time_to_quit is None: return False
except Exception: pass
if self._time_to_quit is None: return False
if isinstance(o, timedelta):
o = ExpiryTime(o)
if o._time_to_quit is None: return True
return self._time_to_quit < o._time_to_quit
def __gt__(self, o):
try:
if self._time_to_quit is None and o._time_to_quit is None: return False
except Exception: pass
return not self.__lt__(o)
def __le__(self, o): return self.__eq__(o) or self.__lt__(o)
def __ge__(self, o): return self.__eq__(o) or self.__gt__(o)
def __ne__(self, o): return not self.__eq__(o)
def __bool__(self): return self.expired()
def __nonzero__(self): return self.expired()
class ExpirationTimer(object):
"""Keeps track of a duration relative to an original time and
indicates whether that duration has expired or how much time is
left before it expires. As an optimization, this object will
not call datetime.now() itself and must be updated via the
`update_time_now()` method to accurately measure elapsed
time.
May also be initialized with a duration of None, indicating
that it should never timeout and that `remaining()` should
return the forever value (defaulting to None).
"""
def __init__(self, duration, timenow=None):
self._time_now = timenow or datetime.now()
self._time_to_quit = None if duration is None else (self._time_now + duration)
def update_time_now(self, timenow):
"Call this to update the elapsed time."
self._time_now = timenow
def expired(self):
"Returns true if the indicated duration has passed since this was created."
return False if self._time_to_quit is None else (self._time_now >= self._time_to_quit)
def remaining(self, forever=None):
"""Returns a timedelta of remaining time until expiration, or 0 if the
duration has already expired. Returns forever if no timeout."""
return forever if self._time_to_quit is None else \
(timedelta(seconds=0) if self._time_now > self._time_to_quit else \
(self._time_to_quit - self._time_now))
def remainingSeconds(self, forever=None):
"""Similar to `remaining()`, but returns an floating point value of the
number of remaining seconds instead of returning a
timedelta object.
"""
return forever if self._time_to_quit is None else \
(0 if self._time_now > self._time_to_quit else \
timePeriodSeconds(self._time_to_quit - self._time_now))
def __str__(self):
if self._time_to_quit is None: return 'Forever'
if self.expired():
return 'Expired_for_%s'%(self._time_now - self._time_to_quit)
return 'Expires_in_' + str(self.remaining())
def __eq__(self, o):
if isinstance(o, timedelta):
o = ExpiryTime(o)
if self._time_to_quit == o._time_to_quit: return True
if self._time_to_quit == None or o._time_to_quit == None: return False
if self.expired() and o.expired(): return True
return abs(self._time_to_quit - o._time_to_quit) < timedelta(microseconds=1)
def __lt__(self, o):
try:
if self._time_to_quit is None and o._time_to_quit is None: return False
except Exception: pass
if self._time_to_quit is None: return False
if isinstance(o, timedelta):
o = ExpiryTime(o)
if o._time_to_quit is None: return True
return self._time_to_quit < o._time_to_quit
def __gt__(self, o):
try:
if self._time_to_quit is None and o._time_to_quit is None: return False
except Exception: pass
return not self.__lt__(o)
def __le__(self, o): return self.__eq__(o) or self.__lt__(o)
def __ge__(self, o): return self.__eq__(o) or self.__gt__(o)
def __ne__(self, o): return not self.__eq__(o)
def __bool__(self): return self.expired()
def __nonzero__(self): return self.expired()
|
[
"kquick@godaddy.com"
] |
kquick@godaddy.com
|
65a4c9be5fe8054649cd3ece5dbe367bc18a3e9e
|
2dc17d12ff6ea9794177c81aa4f385e4e09a4aa5
|
/archive/55JumpGame.py
|
f503b433be7298c6acd75a571e1e0b2c43dd3322
|
[] |
no_license
|
doraemon1293/Leetcode
|
924b19f840085a80a9e8c0092d340b69aba7a764
|
48ba21799f63225c104f649c3871444a29ab978a
|
refs/heads/master
| 2022-10-01T16:20:07.588092
| 2022-09-08T02:44:56
| 2022-09-08T02:44:56
| 122,086,222
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
Python
| false
| false
| 398
|
py
|
# coding=utf-8
'''
Created on 2017�3�7�
@author: Administrator
'''
class Solution(object):
def canJump(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
mini_true = len(nums) - 1
for i in xrange(len(nums) - 1, -1, -1):
if nums[i] >= (mini_true - i):
mini_true = i
return mini_true == 0
|
[
"yanhuang1293@gmail.com"
] |
yanhuang1293@gmail.com
|
db7d00585385b6589b6b11d0e3b16814d349cc17
|
a61ebd1507eeaa334aff44800b022ef0a258752a
|
/Code/CodeChef/remainder.py
|
37c511c002d48cc5390d47f937a2ec559c35e257
|
[
"MIT"
] |
permissive
|
Jimut123/competitive_programming
|
14ce0ab65414e6086763519f95487cddc91205a9
|
b4cdebaceee719c1a256921829ebafda11c515f5
|
refs/heads/master
| 2023-03-05T15:42:57.194176
| 2022-04-08T08:53:26
| 2022-04-08T08:53:26
| 156,541,142
| 1
| 0
| null | 2019-05-29T17:10:28
| 2018-11-07T12:09:55
|
C++
|
UTF-8
|
Python
| false
| false
| 152
|
py
|
#jimutbahanpal@yahoo.com
t = int(input())
l = []
for i in range(t):
m,n = map(int,input().split())
l.append(m%n)
for item in l:
print(item)
|
[
"jimutbahanpal@yahoo.com"
] |
jimutbahanpal@yahoo.com
|
aa9c2bf1b305cc6403a880948c9ce34f01af5268
|
2d19317ab9af09be9e6c8f0a25d4a43d4632b680
|
/first_project/urls.py
|
44eee9ee4a82b0a41d31cece1c0325b3b2218316
|
[] |
no_license
|
rudiq4/first_project
|
73837d297b21ccd7c706fc08373473e9e4cd8b29
|
ba0e987f863f599da9700c355875af76158b76f0
|
refs/heads/master
| 2021-01-25T13:41:56.102967
| 2018-03-27T14:15:26
| 2018-03-27T14:15:26
| 123,607,387
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,132
|
py
|
"""first_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('Auto.urls')),
url(r'^', include('Post.urls')),
url(r'user/', include('User.urls')),
] \
+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) \
+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"rudikvovan@gmail.com"
] |
rudikvovan@gmail.com
|
8d4588530f69c619168a4cc1e6f9fb07ba1e6326
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/artificial/transf_RelativeDifference/trend_Lag1Trend/cycle_12/ar_12/test_artificial_128_RelativeDifference_Lag1Trend_12_12_20.py
|
b92931e266c853cfe294b5ace5bc7d11ca7edc8c
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970
| 2018-12-17T22:08:11
| 2018-12-17T22:08:11
| 137,104,552
| 0
| 0
|
BSD-3-Clause
| 2018-12-17T22:08:12
| 2018-06-12T17:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 280
|
py
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 12, transform = "RelativeDifference", sigma = 0.0, exog_count = 20, ar_order = 12);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
89474e153defaaa9938f24de4429e92defcd0542
|
d723b9c2dcfc9e3366928fd0ea18ee5ee19c2b3c
|
/backend/apps/detections/upload_sets.py
|
140c2d9bcfbee0781d15cc18a2aab00c879d9188
|
[] |
no_license
|
skarzi/yb_hackathon_2019
|
ff8266e89ae6fa74d57c61e4117d6fc176dba825
|
83c3d96795f6b14f97683ad5c998579adb3faaf4
|
refs/heads/master
| 2020-09-11T01:34:55.206979
| 2020-07-19T07:50:16
| 2020-07-19T07:50:16
| 221,895,345
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
from flask import current_app
from flask_uploads import (
IMAGES,
UploadSet,
configure_uploads,
)
detections = UploadSet(name='detections', extensions=IMAGES)
configure_uploads(current_app, (detections,))
|
[
"skarzynski_lukasz@protonmail.com"
] |
skarzynski_lukasz@protonmail.com
|
71402662a43efd9f3ece9bfc6b5fb824add27987
|
c676bf5e77ba43639faa6f17646245f9d55d8687
|
/tests/ut/python/ops/test_tuple_slice.py
|
ea5112995c06203210d7c6ca569e2949187c6f26
|
[
"Apache-2.0",
"BSD-3-Clause-Open-MPI",
"MPL-2.0-no-copyleft-exception",
"LGPL-2.1-only",
"BSD-3-Clause",
"MPL-2.0",
"MPL-1.0",
"Libpng",
"AGPL-3.0-only",
"MPL-1.1",
"LicenseRef-scancode-proprietary-license",
"MIT",
"IJG",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"GPL-2.0-only",
"BSL-1.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
zhengnengjin/mindspore
|
1e2644e311f54a8bd17010180198a46499e9c88f
|
544b859bb5f46611882749088b44c5aebae0fba1
|
refs/heads/master
| 2022-05-13T05:34:21.658335
| 2020-04-28T06:39:53
| 2020-04-28T06:39:53
| 259,522,589
| 2
| 0
|
Apache-2.0
| 2020-04-28T03:35:33
| 2020-04-28T03:35:33
| null |
UTF-8
|
Python
| false
| false
| 4,665
|
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test_tuple_slice """
import numpy as np
import pytest
from mindspore import Tensor
from mindspore.nn import Cell
import mindspore.ops.operations as P
from ....mindspore_test_framework.mindspore_test import mindspore_test
from ....mindspore_test_framework.pipeline.forward.compile_forward \
import pipeline_for_compile_forward_ge_graph_for_case_by_case_config
from ....mindspore_test_framework.pipeline.forward.verify_exception \
import pipeline_for_verify_exception_for_case_by_case_config
class NetWork_1(Cell):
""" NetWork_1 definition """
def __init__(self):
super(NetWork_1, self).__init__()
self.addN = P.AddN()
def construct(self, tensor_tuple):
tensor_tuple_slice0 = tensor_tuple[:]
tensor_tuple_slice1 = tensor_tuple[:3]
tensor_tuple_slice2 = tensor_tuple[1:]
tensor_tuple_slice3 = tensor_tuple[2:5:1]
sum0 = self.addN(tensor_tuple_slice0)
sum1 = self.addN(tensor_tuple_slice1)
sum2 = self.addN(tensor_tuple_slice2)
sum3 = self.addN(tensor_tuple_slice3)
ret = sum0 + sum1 + sum2 + sum3
return ret
class NetWork_2(Cell):
""" NetWork_2 definition """
def __init__(self):
super(NetWork_2, self).__init__()
self.addN = P.AddN()
def construct(self, tensor_tuple):
tensor_tuple_slice0 = tensor_tuple[::-1]
tensor_tuple_slice1 = tensor_tuple[-1::-1]
tensor_tuple_slice2 = tensor_tuple[:-4:-1]
tensor_tuple_slice3 = tensor_tuple[-6:3]
tensor_tuple_slice4 = tensor_tuple[-1:-6:-2]
sum0 = self.addN(tensor_tuple_slice0)
sum1 = self.addN(tensor_tuple_slice1)
sum2 = self.addN(tensor_tuple_slice2)
sum3 = self.addN(tensor_tuple_slice3)
sum4 = self.addN(tensor_tuple_slice4)
ret = sum0 + sum1 + sum2 + sum3 + sum4
return ret
class NetWork_3(Cell):
""" NetWork_3 definition """
def __init__(self):
super(NetWork_3, self).__init__()
self.addN = P.AddN()
def construct(self, tensor_tuple, start, stop, step=1):
tensor_tuple_slice0 = tensor_tuple[start:stop:step]
res = self.addN(tensor_tuple_slice0)
return res
test_cases = [
('SlicePositive', {
'block': NetWork_1(),
'desc_inputs': [(Tensor(np.ones([2, 3, 4], np.int32)),
Tensor(np.zeros([2, 3, 4], np.int32)),
Tensor(np.ones([2, 3, 4], np.int32)),
Tensor(np.ones([2, 3, 4], np.int32)),
Tensor(np.zeros([2, 3, 4], np.int32)),
Tensor(np.ones([2, 3, 4], np.int32)))],
}),
('SliceNegative', {
'block': NetWork_2(),
'desc_inputs': [(Tensor(np.ones([2, 3, 4], np.int32)),
Tensor(np.zeros([2, 3, 4], np.int32)),
Tensor(np.ones([2, 3, 4], np.int32)),
Tensor(np.ones([2, 3, 4], np.int32)),
Tensor(np.zeros([2, 3, 4], np.int32)),
Tensor(np.ones([2, 3, 4], np.int32)))],
}),
]
test_cases_for_verify_exception = [
('SliceStartCross', {
'block': (NetWork_3(), {'exception': RuntimeError}),
'desc_inputs': [*(Tensor(np.ones([2, 3, 4], np.int32)),
Tensor(np.zeros([2, 3, 4], np.int32)),
Tensor(np.ones([2, 3, 4], np.int32)))],
}),
('SliceStepZero', {
'block': (NetWork_3(), {'exception': RuntimeError}),
'desc_inputs': [*(Tensor(np.ones([2, 3, 4], np.int32)),
Tensor(np.zeros([2, 3, 4], np.int32)),
Tensor(np.ones([2, 3, 4], np.int32)))],
}),
]
@mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config)
def test_compile():
return test_cases
@mindspore_test(pipeline_for_verify_exception_for_case_by_case_config)
def test_check_exception():
return test_cases_for_verify_exception
|
[
"leon.wanghui@huawei.com"
] |
leon.wanghui@huawei.com
|
4b4a097a95f1da6da6dfa3927d7c83d66941ecdf
|
d8f0761acc94f9f1c0365e5a1716c9e17c6e4e16
|
/scrapers/bs4_selectors/selector.py
|
cabcd3ea0bed6889945755aac7fe5cf0cdf9cd8c
|
[] |
no_license
|
lesleyfon/one-time-scrapers
|
75ca851107d59b4f2b7cd816b2ae46ecd11d6bc0
|
6ee5443497c9e05924abf5704c16112beb740064
|
refs/heads/master
| 2023-05-02T12:58:21.693133
| 2021-05-21T13:09:57
| 2021-05-21T13:09:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,990
|
py
|
##############################
#
# Beautiful Soup cheat sheet
#
# by
#
# Code Monkey King
#
##############################
# Step 1: import packages
import requests
from bs4 import BeautifulSoup
# Step 2: define target URL
url = 'https://podsearch.com/listing/car-talk.html'
# Step 3: make HTTP request to the target URL
response = requests.get(url)
# Step 4: parse entire HTML document
content = BeautifulSoup(response.text, 'lxml')
# Step 5: parse PARENT element conteining needed data
parent = content.find('div', {'class': 'col-md-8 col-sm-12 col-xs-12 pdl0'})
# Step 6: parse CHILD element containing the exact data we need
child = parent.find('span').text
# Step 7: split the target string if needed
data = child.split(': ')[-1]
# Step 8: print data to console
print(data)
#####################################
#
# Useful data extraction techniques
#
#####################################
# extract FIRST data occurence by unique class
description = content.find('p', {'class': 'pre-line'}).text
print('\n', description)
# extract ALL data occurences by unique class
text = [
item.text
for item in
content.find_all('p', {'class': 'pre-line'})
]
print('\n', text)
# reference similar data occurences by index
print('\n', text[0])
print('\n', text[1])
# join list elements into one single string by whatever character
print('\n', '\n joined by new line \n'.join(text))
# reference element by whatever attribute (ID in this case)
button = content.find('button', {'id': 'headerSearchButton'}).text
print(button)
# extract FIRST other but textual node data element, e.g. HREF attribute or whatever
link = content.find('a')['href']
print(link)
# extract ALL other but textual node data elements, e.g. HREF attribute or whatever
links = [
link['href']
for link in
content.find_all('a')
# filter on condition if needed
#if link['href'] == 'https://podsearch.com/listing/rethinking-weight-loss.html'
]
print(links)
|
[
"freesoft.for.people@gmail.com"
] |
freesoft.for.people@gmail.com
|
ec63ed048f6211cd69e8bc2bc40d3e6f418eaf0d
|
336cd9225281befde93e01858ede15f70d3e5b47
|
/params/cartpole_obs/shm_default copy.py
|
2ab5c02726c7c6586d11fff9f5736ea8bffe8c5f
|
[] |
no_license
|
GuancongLuo/mpc-mpnet-py
|
7d6ba9f0c954185a724421091b1b098ec6d148e6
|
3d8d8ef743fd467fd2ffe177021edc6e852fd094
|
refs/heads/master
| 2023-02-06T03:49:06.072105
| 2020-12-07T11:01:08
| 2020-12-07T11:01:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,333
|
py
|
import numpy as np
def get_params():
params = {
'solver_type': "cem",
'n_problem': 1,
'n_sample': 32,
'n_elite': 8,
'n_t': 1,
'max_it': 5,
'converge_r': 0.1,
'dt': 2e-3,
'mu_u': [0],
'sigma_u': [400],
'mu_t': 0.5,
'sigma_t': 0.5,
't_max': 1,
'verbose': False, # True,#
'step_size': 1,
"goal_radius": 1.5,
"sst_delta_near": .3,
"sst_delta_drain": 0.1,
"goal_bias": 0.05,
"width": 4,
"hybrid": False,
"hybrid_p": 0.0,
"cost_samples": 1,
"mpnet_weight_path":"mpnet/exported/output/cartpole_obs/mpnet_10k_external_small_model.pt",
#"mpnet_weight_path":"mpnet/exported/output/cartpole_obs/mpnet_10k_external_v2_deep.pt",
# "mpnet_weight_path":"mpnet/exported/output/cartpole_obs/mpnet_10k.pt",
# "mpnet_weight_path":"mpnet/exported/output/cartpole_obs/mpnet_10k_nonorm.pt",
# "mpnet_weight_path":"mpnet/exported/output/cartpole_obs/mpnet_subsample0.5_10k.pt",
# "cost_predictor_weight_path": "mpnet/exported/output/cartpole_obs/cost_10k.pt",
"cost_predictor_weight_path": "mpnet/exported/output/cartpole_obs/cost_10k.pt",
"cost_to_go_predictor_weight_path": "mpnet/exported/output/cartpole_obs/cost_to_go_obs.pt",
"refine": False,
"using_one_step_cost": False,
"refine_lr": 0,
"refine_threshold": 0,
"device_id": "cuda:3",
"cost_reselection": False,
"number_of_iterations": 100000,
"weights_array": [1, 1, 1, 0.5],
'max_planning_time': 50,
'shm_max_steps': 40
}
cuda_batch_params = {
'solver_type' : "cem",
'n_problem' : 1,
'n_sample': 32,
'n_elite': 2,
'n_t': 1,
'max_it': 5,
'converge_r': 1e-1,
'dt': 2e-3,
'mu_u': [0],
'sigma_u': [400],
'mu_t': 0.4,
'sigma_t': 0.5,
't_max': 1,
'verbose': False,#True,#
'step_size': 1,
"goal_radius": 1.5,
"sst_delta_near": .6,
"sst_delta_drain": .3,
"goal_bias": 0.05,
"width": 4,
"hybrid": False,
"hybrid_p": 0.0,
"cost_samples": 5,
"mpnet_weight_path":"mpnet/exported/output/cartpole_obs/mpnet_10k_external_small_model.pt",
#"mpnet_weight_path":"mpnet/exported/output/cartpole_obs/mpnet_10k_external_v2_deep.pt",
# "mpnet_weight_path":"mpnet/exported/output/cartpole_obs/mpnet_10k.pt",
# "mpnet_weight_path":"mpnet/exported/output/cartpole_obs/mpnet_10k_nonorm.pt",
# "mpnet_weight_path":"mpnet/exported/output/cartpole_obs/mpnet_subsample0.5_10k.pt",
"cost_predictor_weight_path": "mpnet/exported/output/cartpole_obs/cost_10k.pt",
"cost_to_go_predictor_weight_path": "mpnet/exported/output/cartpole_obs/cost_to_go_obs.pt",
"refine": False,
"using_one_step_cost": False,
"refine_lr": 0.0,
"refine_threshold": 0.0,
"device_id": "cuda:0",
"cost_reselection": False,
"number_of_iterations": 40000,
"weights_array": [1, 1, 1, .5],
'max_planning_time': 50,
'shm_max_steps': 40
}
return cuda_batch_params
|
[
"you@example.com"
] |
you@example.com
|
5d8565f123ea80979f9cd6a4454521fd2ddff15c
|
b0de612c2f7d03399c0d02c5aaf858a72c9ad818
|
/armi/nuclearDataIO/cccc/tests/test_rzflux.py
|
93771c4e863ba214363f58516bdda65027c1eb5c
|
[
"GPL-1.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
wangcj05/armi
|
2007e7abf4b422caca0157fc4405b7f45fc6c118
|
8919afdfce75451b291e45ca1bc2e03c044c2090
|
refs/heads/master
| 2022-12-22T00:05:47.561722
| 2022-12-13T16:46:57
| 2022-12-13T16:46:57
| 277,868,987
| 0
| 0
|
Apache-2.0
| 2020-07-07T16:32:40
| 2020-07-07T16:32:39
| null |
UTF-8
|
Python
| false
| false
| 2,673
|
py
|
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test rzflux reading and writing.
"""
# pylint: disable=missing-function-docstring,missing-class-docstring,protected-access,invalid-name,no-self-use,no-method-argument,import-outside-toplevel
import os
import unittest
from armi.nuclearDataIO.cccc import rzflux
from armi.utils.directoryChangers import TemporaryDirectoryChanger
THIS_DIR = os.path.dirname(__file__)
# This RZFLUX was made by DIF3D 11 in a Cartesian test case.
SIMPLE_RZFLUX = os.path.join(THIS_DIR, "fixtures", "simple_cartesian.rzflux")
class TestRzflux(unittest.TestCase):
"""Tests the rzflux class"""
def test_readRzflux(self):
"""Ensure we can read a RZFLUX file."""
flux = rzflux.readBinary(SIMPLE_RZFLUX)
self.assertEqual(
flux.groupFluxes.shape, (flux.metadata["NGROUP"], flux.metadata["NZONE"])
)
def test_writeRzflux(self):
"""Ensure that we can write a modified RZFLUX file."""
with TemporaryDirectoryChanger():
flux = rzflux.readBinary(SIMPLE_RZFLUX)
rzflux.writeBinary(flux, "RZFLUX2")
self.assertTrue(binaryFilesEqual(SIMPLE_RZFLUX, "RZFLUX2"))
# perturb off-diag item to check row/col ordering
flux.groupFluxes[2, 10] *= 1.1
flux.groupFluxes[12, 1] *= 1.2
rzflux.writeBinary(flux, "RZFLUX3")
flux2 = rzflux.readBinary("RZFLUX3")
self.assertAlmostEqual(flux2.groupFluxes[12, 1], flux.groupFluxes[12, 1])
def test_rwAscii(self):
"""Ensure that we can read/write in ascii format."""
with TemporaryDirectoryChanger():
flux = rzflux.readBinary(SIMPLE_RZFLUX)
rzflux.writeAscii(flux, "RZFLUX.ascii")
flux2 = rzflux.readAscii("RZFLUX.ascii")
self.assertTrue((flux2.groupFluxes == flux.groupFluxes).all())
def binaryFilesEqual(fn1, fn2):
"""True if two files are bytewise identical."""
with open(fn1, "rb") as f1, open(fn2, "rb") as f2:
for byte1, byte2 in zip(f1, f2):
if byte1 != byte2:
return False
return True
|
[
"noreply@github.com"
] |
wangcj05.noreply@github.com
|
70dd6b6891e4793418f9b327dcf8ddb1de563ef7
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/clouds_20200703183549.py
|
deecd69a4f52fe13e3dd7c9a278d545d91b636a2
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
def jumpingClouds(c):
i = 0
jumps = 0
while i < len(c)-2:
if c[i] == 0 and c[i+2] == 0:
print('here')
print('c---->',c[i],'i-->',i)
jumps +=1
i +=2
elif c[i] == 0 and c[i+1] == 0:
print('here2')
print('c---->',c[i],'i-->',i)
jumps +=1
i +=1
print(jumps)
jumpingClouds([0 ,0, 1, 0, 0, 1, 0])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
2dda85a9ba04d01eb6f79efbf26e1aa3f5fe73a8
|
b23f9b54f622032e71a80a497ca2d7dbd48469ad
|
/setup.py
|
d7560750ca56a0500f4ae92ea7dba81932711c29
|
[] |
no_license
|
h4ck3rm1k3/pycparserext
|
15cf0a02429f3fd6bad977cd612e74ca7b20b891
|
489fd9c4804e7b3f17760b0800cf81a930a2ec7e
|
refs/heads/master
| 2021-01-21T08:32:39.114178
| 2016-04-03T05:22:38
| 2016-04-03T05:22:38
| 55,293,358
| 0
| 0
| null | 2016-04-02T12:29:40
| 2016-04-02T12:29:40
| null |
UTF-8
|
Python
| false
| false
| 893
|
py
|
#!/usr/bin/env python
# -*- coding: latin1 -*-
from setuptools import setup
setup(name="pycparserext",
version="2016.1",
description="Extensions for pycparser",
long_description=open("README.rst", "r").read(),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: Utilities',
],
install_requires=[
"ply>=3.4",
"pycparser>=2.14",
],
author="Andreas Kloeckner",
url="http://pypi.python.org/pypi/pycparserext",
author_email="inform@tiker.net",
license="MIT",
packages=["pycparserext"])
|
[
"inform@tiker.net"
] |
inform@tiker.net
|
4bbf1ff6013d7a48ce63d817454fb8940a26487f
|
7d23fff61314842d6d7d8ca106382d163a04f139
|
/watch/models.py
|
3a423333ca2230cec766903c543e4a2e444032de
|
[
"MIT"
] |
permissive
|
GeGe-K/Neighbourhood
|
8b71bc789a72d34769436a5a912ffde87b3c014b
|
366667dff147141558732e5c6f5004fe4cff221e
|
refs/heads/master
| 2022-12-09T18:26:27.536704
| 2019-01-16T14:13:37
| 2019-01-16T14:13:37
| 165,236,886
| 0
| 0
|
MIT
| 2022-12-08T01:32:31
| 2019-01-11T12:00:41
|
Python
|
UTF-8
|
Python
| false
| false
| 3,462
|
py
|
from django.db import models
from django.contrib.auth.models import User
import datetime as dt
# Create your models here.
class Location(models.Model):
name = models.CharField(max_length=40)
def __str__(self):
return self.name
class Neighbourhood(models.Model):
'''
Neighbourhood class has the following properties
'''
neighbourhood_name = models.CharField(max_length = 30)
neighborhood_location = models.ForeignKey('Location', on_delete = models.CASCADE, null = True, blank =True)
occupants = models.IntegerField(null = True)
admin = models.ForeignKey(User, on_delete = models.CASCADE)
def create_neighbourhood(self):
self.save
def delete_neighbourhood(self):
self.delete()
def __str__(self):
return self.neighbourhood_name
@classmethod
def find_neighbourhood(cls, neighbourhood_id):
neighbourhood = cls.objects.get(id = neighbourhood_id)
return neighbourhood
def update_nighbourhood(self):
self.save()
def update_occupants(self):
self.occupants +=1
self.save()
class UserProfile(models.Model):
'''
UserProfile class has the following properties
'''
first_name = models.CharField(max_length=20, blank=True)
last_name = models.CharField(max_length=20,blank=True)
email = models.EmailField()
user = models.ForeignKey(User,on_delete=models.CASCADE)
neighborhood = models.ForeignKey('Neighbourhood', on_delete=models.CASCADE, null=True, blank=True)
def assign_neighbourhood(self, neighbourhood):
self.neighbourhood = neighborhood
self.save()
def save_profile(self):
self.save()
def delete_profile(self):
self.delete()
def __str__(self):
return f'{self.user.username}'
class Business(models.Model):
'''
Business class has the following properties
'''
business_name = models.CharField(max_length = 50)
owner = models.ForeignKey(User, on_delete = models.CASCADE)
business_neighbourhood = models.ForeignKey(
'Neighbourhood', on_delete = models.CASCADE)
email = models.EmailField()
def create_business(self):
self.save()
def delete_business(self):
self.delete()
@classmethod
def find_business(cls, business_id):
business = cls.objects.get(id = business_id)
return business
def update_business(self, business_name):
self.name = business_name
self.save()
def __str__(self):
return self.business_name
class EmergencyContacts(models.Model):
'''
Emergency contact class has the following properties
'''
name = models.CharField(max_length = 30)
contacts = models.CharField(max_length = 20)
email = models.EmailField()
neighbourhood_contact = models.ForeignKey(
'Neighbourhood', on_delete = models.CASCADE)
def __str__(self):
return f'{self.name},{self.email}'
class Post(models.Model):
'''
Post class has the following properties
'''
title = models.CharField(max_length=40)
post_description = models.TextField(blank = True)
posted_by = models.ForeignKey(User, on_delete = models.CASCADE)
post_hood = models.ForeignKey('Neighbourhood', on_delete = models.CASCADE)
posted_on = models.DateTimeField(auto_now_add = True)
def __str__(self):
return f'{self.title},{self.post_hood.neighbourhood_name}'
|
[
"gloriagivondo@gmail.com"
] |
gloriagivondo@gmail.com
|
3ef2026eb83017aa5c24665674b8d15767fb2008
|
51d8f003828d6ee6e6611f0e133b1e35cf400601
|
/ipaxi/ixbr_api/core/tests/use_cases_tests/test_service_use_case.py
|
830992ce3f08fe190f0264ea26fd19099f6e8a39
|
[
"Apache-2.0"
] |
permissive
|
tatubola/xpto
|
23b5f7a42c13c7d39eb321e52b9b4b2d1ef76c4c
|
6ed8cec23b06bccb1edf57e6b67af017f9a162d3
|
refs/heads/master
| 2020-04-02T11:05:24.560009
| 2018-10-23T17:41:10
| 2018-10-23T17:41:10
| 154,370,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,817
|
py
|
from unittest.mock import patch
from django.core.exceptions import ValidationError
from django.test import TestCase
from model_mommy import mommy
from ...models import ContactsMap, MLPAv4, Tag
from ...use_cases.service_use_case import delete_service_use_case
from ..login import DefaultLogin
class ServiceUseCaseTest(TestCase):
def setUp(self):
DefaultLogin.__init__(self)
p = patch('ixbr_api.core.models.HistoricalTimeStampedModel.full_clean')
p.start()
self.addCleanup(p.stop)
p = patch('ixbr_api.core.models.create_all_ips')
p.start()
self.addCleanup(p.stop)
p = patch('ixbr_api.core.models.create_tag_by_channel_port')
p.start()
self.addCleanup(p.stop)
def test_delete_service_use_case(self):
tag = mommy.make(Tag, status='PRODUCTION')
contacts_map = mommy.make(ContactsMap)
service_mlpav4 = mommy.make(MLPAv4, tag=tag, make_m2m=True)
service_mlpav4.asn.contactsmap_set.add(contacts_map)
self.assertEqual(MLPAv4.objects.filter(pk=service_mlpav4.pk).count(), 1)
delete_service_use_case(pk=service_mlpav4.pk)
self.assertEqual(MLPAv4.objects.filter(pk=service_mlpav4.pk).count(), 0)
def test_fail_delete_service_use_case(self):
tag = mommy.make(Tag, status='PRODUCTION')
contacts_map = mommy.make(ContactsMap)
service_mlpav4 = mommy.make(MLPAv4, tag=tag, make_m2m=True)
service_mlpav4.asn.contactsmap_set.add(contacts_map)
self.assertEqual(MLPAv4.objects.filter(pk=service_mlpav4.pk).count(), 1)
with self.assertRaisesMessage(ValidationError, "Invalid service primary key"):
delete_service_use_case(pk=tag.pk)
self.assertEqual(MLPAv4.objects.filter(pk=service_mlpav4.pk).count(), 1)
|
[
"dmoniz@nic.br"
] |
dmoniz@nic.br
|
dca84f844680918ece78d15a17c804d7d4f4dc67
|
b7683c108e68ee2d28573edf55923eb34cc2f5ee
|
/3_Image_Processing/9_Contours/1_Intro/1_Contours_on_binary.py
|
9be454d9d82d0531c5524d093ed11ff8b9fa6b0f
|
[] |
no_license
|
aCuissot/openVC_win_py_tutorial
|
cc42ab1a1fb6eaefe5a91c7e1bb1926a776b0e01
|
7186b629747cb16f2bf42a03d2339d3dc3ea77bd
|
refs/heads/master
| 2020-05-18T12:17:04.619047
| 2019-07-10T13:45:00
| 2019-07-10T13:45:00
| 184,403,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
import numpy as np
import cv2 as cv
im = cv.imread('../../../Data/in/a.jpg')
imgray = cv.cvtColor(im, cv.COLOR_BGR2GRAY)
_, thresh = cv.threshold(imgray, 127, 255, 0)
contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
cv.drawContours(im, contours, -1, (0, 255, 0), 3)
"""
pour ne dessiner qu'un contour, le 3e par ex:
cv.drawContours(im, contours, 2, (0, 255, 0), 3)
ou
cnt = contours[3]
cv.drawContours(img, [cnt], 0, (0,255,0), 3)
"""
cv.imshow('', im)
cv.waitKey(0)
cv.destroyAllWindows()
|
[
"harrypotter9752@gmail.com"
] |
harrypotter9752@gmail.com
|
56c2dc305f24ba5731f349d4284d1ede0e056579
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/ec2_write_1/ebs-default-kms-key-id_modify.py
|
9590bdb3b63ab3aa9cd39ae2bf409a3fec3eef4f
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,870
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/modify-ebs-default-kms-key-id.html
if __name__ == '__main__':
"""
get-ebs-default-kms-key-id : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/get-ebs-default-kms-key-id.html
reset-ebs-default-kms-key-id : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/reset-ebs-default-kms-key-id.html
"""
parameter_display_string = """
# kms-key-id : The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted state must be true .
You can specify the CMK using any of the following:
Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.
Key alias. For example, alias/ExampleAlias.
Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.
Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.
Amazon EBS does not support asymmetric CMKs.
"""
add_option_dict = {}
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_one_parameter("ec2", "modify-ebs-default-kms-key-id", "kms-key-id", add_option_dict)
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.