blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6a87a8e5d278ede9d444df333d662804bf68b370
|
fbd347498b4ec04440dd91da0f62d3bc8aa85bff
|
/ex.031.precoPassagemOnibus.py
|
a323c227e902c1c41edaa64acec1370c78d468cd
|
[
"MIT"
] |
permissive
|
romulorm/cev-python
|
254ae208b468aa4e23bf59838de389d045f7d8ef
|
b5c6844956c131a9e4e02355459c218739ebf8c5
|
refs/heads/master
| 2021-05-18T22:27:31.179430
| 2020-04-17T01:39:04
| 2020-04-17T01:39:04
| 251,455,084
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
# Exercício Python 031: Desenvolva um programa que pergunte a distância de uma viagem em Km. Calcule o preço da passagem,
# cobrando R$0,50 por Km para viagens de até 200Km e R$0,45 parta viagens mais longas.
distancia = float(input("Qual a distância da viagem, em quilômetros? "))
preco = distancia * 0.45 if distancia > 200 else distancia * 0.50
print("Você vai pagar R$ {:.2f} por uma viagem de {} quilômetros.".format(preco, int(distancia)))
|
[
"62728349+romulorm@users.noreply.github.com"
] |
62728349+romulorm@users.noreply.github.com
|
0d0379c91606561fd1684c3b56b5a59c7ac79ac6
|
2147b03faa984c3f82b452bfa2e44738762c0620
|
/users/models.py
|
0e5dba17c8ef60df08f56723c7b7cee4655f5822
|
[] |
no_license
|
crowdbotics-apps/pawn-shop-30678
|
44d485d1e4bf5540320518921750293c8649ea53
|
844572b9e385948fdfbe1c3113481bf0961e810e
|
refs/heads/master
| 2023-07-30T16:02:19.844017
| 2021-09-19T11:07:57
| 2021-09-19T11:07:57
| 408,103,844
| 2
| 0
| null | 2021-10-06T00:15:01
| 2021-09-19T11:05:16
|
Python
|
UTF-8
|
Python
| false
| false
| 890
|
py
|
from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
class User(AbstractUser):
# WARNING!
"""
Some officially supported features of Crowdbotics Dashboard depend on the initial
state of this User model (Such as the creation of superusers using the CLI
or password reset in the dashboard). Changing, extending, or modifying this model
may lead to unexpected bugs and or behaviors in the automated flows provided
by Crowdbotics. Change it at your own risk.
This model represents the User instance of the system, login system and
everything that relates with an `User` is represented by this model.
"""
name = models.CharField(
null=True,
blank=True,
max_length=255,
)
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
9327c7c353f57edc531a78952f182e4b45b0c405
|
a46e3ab5260c819e2b1a20343205b248a76314f3
|
/pycharm_dict_str_split_unexpected.py
|
9c4c8332fb5b3185d40c302f5e19bc170359ecf9
|
[] |
no_license
|
albertz/playground
|
97ea882eb077e341c69f9e593918d38f89f8bc64
|
f30c6330d855056f1756eeb558aa51fe72040c4e
|
refs/heads/master
| 2023-08-16T16:33:01.780047
| 2023-07-31T11:46:58
| 2023-07-31T11:46:58
| 3,687,829
| 10
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
"""
https://youtrack.jetbrains.com/issue/PY-43916
"""
s = "a=b,c=d"
opts = dict([opt.split("=", 1) for opt in s.split(",")])
print(opts)
|
[
"albzey@gmail.com"
] |
albzey@gmail.com
|
74dfd93a93cab894593cc99b17f0005ace2dc769
|
3e71f4d64b63e74a61447994a68f497f66c5e905
|
/nnutil/model/adversarial_transformer.py
|
b216c878941bcd5cfa1c15722b704591dca182ec
|
[
"BSD-3-Clause"
] |
permissive
|
aroig/nnutil
|
40a648ec56214dbad8610ec8d9c1bdc642f136e9
|
88df41ee89f592a28c1661ee8837dd8e8ca42cf3
|
refs/heads/master
| 2020-03-25T18:58:01.708160
| 2019-06-18T22:00:54
| 2019-06-18T22:00:54
| 144,058,380
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,274
|
py
|
import os
import numpy as np
import tensorflow as tf
import nnutil as nn
from .base_model import BaseModel
class AdversarialTransformer(BaseModel):
def __init__(self, name, shape):
super(AdversarialTransformer, self).__init__(name)
self._shape = shape
self._transformer = None
self._discriminator = None
@property
def input_shape(self):
return self._shape
@property
def output_shape(self):
return self._shape
@property
def layers(self):
return self._transformer.layers
def transformer_network(self, params):
raise NotImplementedError
def discriminator_network(self, params):
raise NotImplementedError
def features_placeholder(self, batch_size=1):
return {
'source': tf.placeholder(dtype=tf.float32,
shape=(batch_size,) + self._shape,
name='source'),
'target': tf.placeholder(dtype=tf.float32,
shape=(batch_size,) + self._shape,
name='target')
}
def loss_function(self, tgt_image, synth_image, params):
step = tf.train.get_global_step()
# Sample weights, so that easy samples weight less
sample_bias = params.get('sample_bias', 0.0)
sample_bias_step = params.get('sample_bias_step', 0)
# Regularizer weight
regularizer = params.get('regularizer', 0.0)
regularizer_step = params.get('regularizer_step', 0)
# Calculate total loss function
with tf.name_scope('losses'):
sample_loss = tf.norm(nn.util.flatten(synth_image - tgt_image), ord=2, axis=1)
# TODO: perform importance sampling here
model_loss = tf.reduce_mean(sample_loss)
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
regularization_dampening = tf.sigmoid(tf.cast(step - regularizer_step, dtype=tf.float32) / 10.0)
total_loss = model_loss + regularizer * regularization_dampening * sum([l for l in regularization_losses])
tf.summary.scalar("model_loss", model_loss)
return total_loss
def model_fn(self, features, labels, mode, params, config):
src_image = features['source']
tgt_image = features['target']
step = tf.train.get_global_step()
training = (mode == tf.estimator.ModeKeys.TRAIN)
self._transformer = nn.layers.Segment(self.transformer_network(params), name="transformer")
self._discriminator = nn.layers.Segment(self.transformer_network(params), name="discriminator")
synth_image = self._transformer.apply(src_image, training=training)
if mode == tf.estimator.ModeKeys.PREDICT:
return self.prediction_estimator_spec(src_image, synth_image, params, config)
loss = self.loss_function(tgt_image, synth_image, params)
# Configure the training and eval phases
if mode == tf.estimator.ModeKeys.TRAIN:
return self.training_estimator_spec(loss, src_image, synth_image, tgt_image, params, config)
else:
return self.evaluation_estimator_spec(loss, src_image, synth_image, tgt_image, params, config)
def training_estimator_spec(self, loss, src_image, synth_image, tgt_image, params, config):
step = tf.train.get_global_step()
learning_rate = params.get('learning_rate', 0.0001)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=0, beta2=0.9)
# Manually apply gradients. We want the gradients for summaries. We need
# to apply them manually in order to avoid having duplicate gradient ops.
gradients = optimizer.compute_gradients(loss)
# Make sure we update averages on each training step
extra_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_ops):
train_op = optimizer.apply_gradients(gradients, global_step=step)
nn.summary.image_transformation(
"transformation",
src_image[0, :],
synth_image[0, :])
nn.summary.image_transformation(
"truth",
tgt_image[0, :],
synth_image[0, :])
nn.summary.layers("layer_summary_{}".format(self._transformer.name),
layers=self._transformer.layers,
gradients=gradients,
activations=self._transformer.layer_activations)
nn.summary.layers("layer_summary_{}".format(self._discriminator.name),
layers=self._discriminator.layers,
gradients=gradients,
activations=self._discriminator.layer_activations)
training_hooks = []
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.TRAIN,
loss=loss,
training_hooks=training_hooks,
train_op=train_op)
def evaluation_estimator_spec(self, loss, src_image, synth_image, tgt_image, params, config):
eval_metric_ops = {}
evaluation_hooks = []
# Make sure we run update averages on each training step
extra_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_ops):
loss = tf.identity(loss)
eval_dir = os.path.join(config.model_dir, "eval")
evaluation_hooks.append(
nn.train.EvalSummarySaverHook(
output_dir=eval_dir,
summary_op=tf.summary.merge_all()
)
)
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
evaluation_hooks=evaluation_hooks,
eval_metric_ops=eval_metric_ops)
def prediction_estimator_spec(self, src_image, synth_image, params, config):
predictions = {
"synth": synth_image
}
exports = {}
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
export_outputs=exports)
|
[
"abdo.roig@gmail.com"
] |
abdo.roig@gmail.com
|
f55c8a4be2c1181299895c4fe33e44f6c2de40c5
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/ia95ckhN5ztgfJHe4_7.py
|
3ba1f214826ddee32eec6ed44940399db61237ca
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,202
|
py
|
"""
In JavaScript, there are two types of comments:
1. Single-line comments start with `//`
2. Multi-line or inline comments start with `/*` and end with `*/`
The input will be a sequence of `//`, `/*` and `*/`. **Every`/*` must have a
`*/` that immediately follows it**. To add, there can be **no single-line
comments in between multi-line comments** in between the `/*` and `*/`.
Create a function that returns `True` if comments are properly formatted, and
`False` otherwise.
### Examples
comments_correct("//////") ➞ True
# 3 single-line comments: ["//", "//", "//"]
comments_correct("/**//**////**/") ➞ True
# 3 multi-line comments + 1 single-line comment:
# ["/*", "*/", "/*", "*/", "//", "/*", "*/"]
comments_correct("///*/**/") ➞ False
# The first /* is missing a */
comments_correct("/////") ➞ False
# The 5th / is single, not a double //
### Notes
N/A
"""
def comments_correct(txt):
if len(txt)%2 !=0:
return False
chunks = []
for n in range(0,len(txt)-2,2):
chunks.append(txt[n:n+2])
for i in range(len(chunks)-1):
if chunks[i] == '/*' and chunks[i+1] != '*/':
return False
return True
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
6f0da9774e428291d826ce32f0b2b035b3d95848
|
adb6fe118613d60af9abfa73055599d205cf2108
|
/视觉/XLwork/XL4/XL4_2.py
|
5566d364d670d79d8f81b8ab2cda2c8a9d120eab
|
[] |
no_license
|
lijianmin01/Third_college_grade
|
18845f666a7fc1ece24d2ee45ee287e1efc0ca11
|
5e5b1f64375506de79ed94c8b2fc266fe1af4d6a
|
refs/heads/master
| 2022-12-31T15:26:05.521297
| 2020-10-17T01:34:32
| 2020-10-17T01:34:32
| 295,317,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,738
|
py
|
import cv2
import numpy as np
# 全局变量
# 第几张图片 0 第一张 1 第二张
img_flag = 0
# 第一张图片
def on_EVENT_LBUTTONDOWN1(event, x, y,flags, param):
# 点击三次,获得三个位置的坐标,销毁窗口
if event == cv2.EVENT_LBUTTONDOWN:
xy = "%d,%d" % (x, y)
a.append(x)
b.append(y)
cv2.circle(img1, (x, y), 1, ( 0, 0,255), thickness=4)
cv2.putText(img1, xy, (x, y), cv2.FONT_HERSHEY_PLAIN,1.0, (0, 0, 0), thickness=1)
cv2.imshow("image1", img1)
# 第二张图片
def on_EVENT_LBUTTONDOWN2(event, x, y,flags, param):
# 点击三次,获得三个位置的坐标,销毁窗口
if event == cv2.EVENT_LBUTTONDOWN:
xy = "%d,%d" % (x, y)
a1.append(x)
b1.append(y)
cv2.circle(img2, (x, y), 1, (255, 0, 0), thickness=4)
cv2.putText(img2, xy, (x, y), cv2.FONT_HERSHEY_PLAIN,1.0, (0, 0, 0), thickness=1)
cv2.imshow("image2", img2)
# 获取同名点对
def get_same_point(img_flag):
# 第一张图片
cv2.namedWindow("image1")
cv2.setMouseCallback("image1", on_EVENT_LBUTTONDOWN1)
cv2.imshow("image1", img1)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 第二张图片
cv2.namedWindow("image2")
cv2.setMouseCallback("image2", on_EVENT_LBUTTONDOWN2)
cv2.imshow("image2", img2)
cv2.waitKey(0)
cv2.destroyAllWindows()
# print(a)
# print(b)
# print(a1)
# print(b1)
len_1 = len(a)
img_sq_1 = np.ones((len_1,3),dtype='int')
img_sq_2 = np.ones((len_1,2), dtype='int')
img_sq_1[:,0] = a[:]
img_sq_1[:,1] = b[:]
img_sq_2[:,0] = a1[:]
img_sq_2[:,1] = b1[:]
print(img_sq_1)
print(img_sq_2)
return img_sq_1,img_sq_2
if __name__ == '__main__':
# 分表保存第一张图片与第二张图片的同名点对
# 记录同名点对
# 第一张图片
a,b = [], []
# 第二张图片
a1, b1 = [], []
img1 = cv2.imread(r'T:\imgs\XL4\klcc_a.png')
img2 = cv2.imread(r"T:\imgs\XL4\klcc_b.png")
img1_copy = img1[:]
img2_copy = img2[:]
# img_sq_1, img_sq_2 = get_same_point(img_flag)
# 获取同点对
img_sq_1,img_sq_2 = get_same_point(img_flag)
"""
[[318 250 1]
[153 318 1]
[344 351 1]]
[[243 270]
[ 74 342]
[272 369]]
# 为了避免重复获取同点对,所以直接获取,后期删了
X = np.mat([[318,250,1],[153,318,1],[344,351,1]])
U = np.mat([[243,270],[ 74,342],[272,369]])
"""
X = np.mat(img_sq_1)
U = np.mat(img_sq_2)
# 前A
A = np.dot(X.I,U)
print(A)
# 因为当时画图的时候,图像已经被修改,所以,恢复原图像
img1 = img1_copy[:]
img2 = img2_copy[:]
M1,N1 = img1.shape[0],img1.shape[1]
M2, N2 = img2.shape[0], img2.shape[1]
img1_cnt = img1[:]
img2_cnt = img2[:]
# 建立一个大型图片
# 确定变换后的图片坐标
# 变换后图片的坐标 X
save_img2 = []
for x in range(M1):
for y in range(N1):
cnt_sq = np.array([x, y, 1]).dot(A)
cnt_sq = [int(cnt_sq.tolist()[0][0]),int(cnt_sq.tolist()[0][1])]
save_img2.append(cnt_sq)
# 参考图片 U
save_img1 = []
for x in range(M2):
for y in range(N2):
save_img1.append([x,y])
save_img1 = np.array(save_img1)
# 找变换后的图片的最小坐标
save_img2=np.array(save_img2)
min_h = np.min(save_img2,axis=1)
# 记录x,y 最小坐标
x_min,y_min = min_h[0],min_h[1]
img3 = np.zeros([1000,1000, 3], np.uint8)
save_img1_1 = save_img1[:]
save_img2_1 = save_img2[:]
if x_min<0:
cnt = abs(x_min)
for i in range(len(save_img1)):
save_img1[i][0]+=cnt
for i in range(len(save_img2)):
save_img2[i][0]+=cnt
if y_min<0:
cnt = abs(y_min)
for i in range(len(save_img1)):
save_img1[i][1]+=cnt
for i in range(len(save_img2)):
save_img2[i][1]+=cnt
# print(save_img1_1)
# print(save_img2_1)
for i in range(len(save_img1)):
try:
img3[save_img1_1[i][0],save_img1_1[i][1]]=img1[save_img1[i][0],save_img1[i][1]]
except:
# img3[save_img1_1[i][0], save_img1_1[i][1]] = img1[save_img1[i][0]-1, save_img1[i][1]-1]
continue
for i in range(len(save_img2)):
try:
img3[save_img2_1[i][0],save_img2_1[i][1]]=img2[save_img2[i][0],save_img2[i][1]]
except:
#img3[save_img1_1[i][0], save_img1_1[i][1]] = img1[save_img2[i][0]-1, save_img2[i][1]-1]
continue
cv2.imshow("3",img3)
cv2.waitKey(0)
|
[
"lijianmin01@126.com"
] |
lijianmin01@126.com
|
b741ff399ab76da7346243c4a6b8b998babe038f
|
b3f33d53507b09bc8193c5fc113fe2f28d95f6da
|
/empinfo/forms.py
|
4e01d73915ed2eae414d2d03cf9e13da6356e549
|
[] |
no_license
|
Jayant2185/Django_Employee
|
f07beb3b3497a84c75ba43a623a7ebb7390b18b4
|
ac3650670ddecd634b847bb39c3f0e9372b1bb4f
|
refs/heads/master
| 2020-04-24T07:49:20.986569
| 2019-02-21T06:13:06
| 2019-02-21T06:13:06
| 171,810,493
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
from django import forms
from empinfo.models import Employee
class EmployeeForm(forms.ModelForm):
class Meta:
model = Employee
fields = "__all__"
|
[
"me@gmail.com"
] |
me@gmail.com
|
07f5f0febe13636216e15925edf3d44f1db27c2f
|
2d80791a21a049243dd2bf7dd95a46c4d4b2510b
|
/domains/datastructures/linkedlist/CompareLists.py
|
c0436a2864f8f8fe985aca810532048acb762799
|
[] |
no_license
|
jayrav13/jr-hackerrank
|
909346d101fdf08a54ff75ec2ee39c90e661b251
|
b7e0130fdd1c2eb4436871be3255200eac8ca3d9
|
refs/heads/master
| 2021-01-15T15:36:16.772814
| 2016-11-21T18:59:15
| 2016-11-21T18:59:15
| 48,657,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 586
|
py
|
"""
Compare two linked list
head could be None as well for empty list
Node is defined as
return back the head of the linked list in the below method.
"""
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
def CompareLists(headA, headB):
count = 0
while headA != None or headB != None:
if (headA == None and headB != None) or (headA != None and headB == None):
return 0
if headA.data != headB.data:
count = count + 1
headA = headA.next
headB = headB.next
if count == 0:
return 1
else:
return 0
|
[
"jayrav13@gmail.com"
] |
jayrav13@gmail.com
|
f875953eeb6bc655bf365406127b7e55f238a6e8
|
d91fe0e972f2befab71987a732111b56245c5efc
|
/example_sm_pkg/nodes/subscriber_publisher_class_example.py
|
9c626cf9b3902a827c6dde41ecd95bc0f3438280
|
[] |
no_license
|
karla3jo/robocup2014
|
2064e8102d5a3251ae582b7ed37ab80d0398f71c
|
3d8563956fd1276b7e034402a9348dd5cb3dc165
|
refs/heads/master
| 2020-07-26T08:22:13.932741
| 2014-07-14T13:58:48
| 2014-07-14T13:58:48
| 21,850,936
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,093
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 22 12:00:00 2013
@author: sampfeiffer
"""
import roslib; roslib.load_manifest('example_sm_pkg')
import rospy
import sys
from std_msgs.msg import String
class myNode():
def __init__(self, argument_one):
# my class variables
self.my_variable_string = "I'm a string yo!"
self.subs = rospy.Subscriber('my_topic_to_subscribe', String, self.myCallback)
self.pub = rospy.Publisher('my_topic_to_publish', String, latch=True)
self.myMethod()
def myCallback(self, data):
rospy.loginfo("Received from topic data!")
self.myMethod()
def myMethod(self):
rospy.loginfo("Using the method!")
publish_this_thing = String("I'm the content of a string!")
self.pub.publish(publish_this_thing)
if __name__ == '__main__':
rospy.init_node('node_name')
if len(sys.argv) < 2:
print "Error, we need an arg!"
rospy.loginfo("No args given, closing...")
exit()
node = myNode("this is an argument")
rospy.spin()
|
[
"sammypfeiffer@gmail.com"
] |
sammypfeiffer@gmail.com
|
995e5340e3e9b0c8c5de25b5387d45937c15f28d
|
ac7e039a70ba627f6d9a7a02c9a8849ed5e18a89
|
/unep.project-database/tags/0.5/reports/ProjectsByStaffReportFactory.py
|
098ba274cf4516efa70e202f34be5109ec024408
|
[] |
no_license
|
jean/project-database
|
65a2559844175350351ba87e820d25c3037b5fb2
|
e818d322ec11d950f2770cd5324fbcd1acaa734d
|
refs/heads/master
| 2021-01-01T06:27:24.528764
| 2014-01-31T11:11:45
| 2014-01-31T11:11:45
| 32,125,426
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 791
|
py
|
from Report import Report
class ProjectsByStaffReportFactory(object):
def __init__(self, context, **kw):
self.context = context
self.params = kw
def getReport(self):
# create and fill the report
name = "Projects by Staff Report"
report = Report(name)
report.setReportHeaders(( name,),)
report.setTableHeaders(((
'Staff Name',
'Staff position',
'Focal Area',
'Project Title',
'Project Cycle Stage',
'Last milestone action',
'Actual date',
'Project Grant ',
),))
# XXX Implement this
# report.setTableRows()
# report.setTableTotals([])
# report.setReportFooters()
return report
|
[
"jurgen.blignaut@61ed036f-b72b-0410-9ea5-b9ec1d72d98d"
] |
jurgen.blignaut@61ed036f-b72b-0410-9ea5-b9ec1d72d98d
|
ea9d3ee3230d73f421fb22d2f59af8f113c81b91
|
c1bd12405d244c5924a4b069286cd9baf2c63895
|
/azure-mgmt-network/azure/mgmt/network/v2015_06_15/models/load_balancing_rule_py3.py
|
695a8e63e56403f3519346e6c5ce8aa055f7b53e
|
[
"MIT"
] |
permissive
|
lmazuel/azure-sdk-for-python
|
972708ad5902778004680b142874582a284a8a7c
|
b40e0e36cc00a82b7f8ca2fa599b1928240c98b5
|
refs/heads/master
| 2022-08-16T02:32:14.070707
| 2018-03-29T17:16:15
| 2018-03-29T17:16:15
| 21,287,134
| 1
| 3
|
MIT
| 2019-10-25T15:56:00
| 2014-06-27T19:40:56
|
Python
|
UTF-8
|
Python
| false
| false
| 5,391
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class LoadBalancingRule(SubResource):
"""A loag balancing rule for a load balancer.
All required parameters must be populated in order to send to Azure.
:param id: Resource Identifier.
:type id: str
:param frontend_ip_configuration: A reference to frontend IP addresses.
:type frontend_ip_configuration:
~azure.mgmt.network.v2015_06_15.models.SubResource
:param backend_address_pool: A reference to a pool of DIPs. Inbound
traffic is randomly load balanced across IPs in the backend IPs.
:type backend_address_pool:
~azure.mgmt.network.v2015_06_15.models.SubResource
:param probe: The reference of the load balancer probe used by the load
balancing rule.
:type probe: ~azure.mgmt.network.v2015_06_15.models.SubResource
:param protocol: Required. The transport protocol for the external
endpoint. Possible values are 'Udp' or 'Tcp'. Possible values include:
'Udp', 'Tcp'
:type protocol: str or
~azure.mgmt.network.v2015_06_15.models.TransportProtocol
:param load_distribution: The load distribution policy for this rule.
Possible values are 'Default', 'SourceIP', and 'SourceIPProtocol'.
Possible values include: 'Default', 'SourceIP', 'SourceIPProtocol'
:type load_distribution: str or
~azure.mgmt.network.v2015_06_15.models.LoadDistribution
:param frontend_port: Required. The port for the external endpoint. Port
numbers for each rule must be unique within the Load Balancer. Acceptable
values are between 1 and 65534.
:type frontend_port: int
:param backend_port: The port used for internal connections on the
endpoint. Acceptable values are between 1 and 65535.
:type backend_port: int
:param idle_timeout_in_minutes: The timeout for the TCP idle connection.
The value can be set between 4 and 30 minutes. The default value is 4
minutes. This element is only used when the protocol is set to TCP.
:type idle_timeout_in_minutes: int
:param enable_floating_ip: Configures a virtual machine's endpoint for the
floating IP capability required to configure a SQL AlwaysOn Availability
Group. This setting is required when using the SQL AlwaysOn Availability
Groups in SQL server. This setting can't be changed after you create the
endpoint.
:type enable_floating_ip: bool
:param provisioning_state: Gets the provisioning state of the PublicIP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'protocol': {'required': True},
'frontend_port': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'},
'backend_address_pool': {'key': 'properties.backendAddressPool', 'type': 'SubResource'},
'probe': {'key': 'properties.probe', 'type': 'SubResource'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'load_distribution': {'key': 'properties.loadDistribution', 'type': 'str'},
'frontend_port': {'key': 'properties.frontendPort', 'type': 'int'},
'backend_port': {'key': 'properties.backendPort', 'type': 'int'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'enable_floating_ip': {'key': 'properties.enableFloatingIP', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, protocol, frontend_port: int, id: str=None, frontend_ip_configuration=None, backend_address_pool=None, probe=None, load_distribution=None, backend_port: int=None, idle_timeout_in_minutes: int=None, enable_floating_ip: bool=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(LoadBalancingRule, self).__init__(id=id, **kwargs)
self.frontend_ip_configuration = frontend_ip_configuration
self.backend_address_pool = backend_address_pool
self.probe = probe
self.protocol = protocol
self.load_distribution = load_distribution
self.frontend_port = frontend_port
self.backend_port = backend_port
self.idle_timeout_in_minutes = idle_timeout_in_minutes
self.enable_floating_ip = enable_floating_ip
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
[
"noreply@github.com"
] |
lmazuel.noreply@github.com
|
32299d75b478e539707e32ef50bd264407775fda
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02796/s396415723.py
|
183a9157222ec19c1853c4eb1252eb8dcabd5ca6
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
rs = [(x - l, x + l) for x, l in (map(int, input().split()) for _ in range(int(input())))]
rs.sort(key=lambda x: x[1])
last = - 10 ** 9
ans = 0
for l, r in rs:
if last <= l:
ans += 1
last = r
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
122b76e57de2082a15a22ffe30f332ef29d31dd6
|
8245ecc361319340b5b196b76dc8cf1d5075c3b1
|
/reservations/views.py
|
6872d1de2dbff040b8a1412b6e1b63bdd5a01625
|
[] |
no_license
|
Korimse/airbnb_clone
|
bc267e384fc098f179387ba3153614c71f999edc
|
c20a82cb196ad9ad6b697cf874bca34b5461c87e
|
refs/heads/master
| 2023-06-30T11:17:53.412006
| 2021-08-03T16:30:47
| 2021-08-03T16:30:47
| 391,269,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,313
|
py
|
import datetime
from django.views.generic import View
from django.contrib import messages
from django.shortcuts import render, redirect, reverse
from django.http import Http404
from rooms import models as room_models
from reviews import forms as review_forms
from . import models
class CreateError(Exception):
pass
def create(request, room, year, month, day):
try:
date_obj = datetime.datetime(year, month, day)
room = room_models.Room.objects.get(pk=room)
models.BookedDay.objects.get(day=date_obj, reservation__room=room)
raise CreateError()
except (room_models.Room.DoesNotExist, CreateError):
messages.error(request, "Can't Reserve That Room")
return redirect(reverse("core:home"))
except models.BookedDay.DoesNotExist:
reservation = models.Reservation.objects.create(
guest=request.user,
room=room,
check_in=date_obj,
check_out=date_obj + datetime.timedelta(days=1),
)
return redirect(reverse("reservations:detail", kwargs={"pk": reservation.pk}))
class ReservationDetailView(View):
def get(self, *args, **kwargs):
pk = kwargs.get("pk")
reservation = models.Reservation.objects.get_or_none(pk=pk)
if not reservation or (
reservation.guest != self.request.user
and reservation.room.host != self.request.user
):
raise Http404()
form = review_forms.CreateReviewForm()
return render(
self.request,
"reservations/detail.html",
{"reservation": reservation, "form": form},
)
def edit_reservation(request, pk, verb):
reservation = models.Reservation.objects.get_or_none(pk=pk)
if not reservation or (
reservation.guest != request.user and reservation.room.host != request.user
):
raise Http404()
if verb == "confirm":
reservation.status = models.Reservation.STATUS_CONFIRMED
elif verb == "cancel":
reservation.status = models.Reservation.STATUS_CANCELED
models.BookedDay.objects.filter(reservation=reservation).delete()
reservation.save()
messages.success(request, "Reservation Updated")
return redirect(reverse("reservations:detail", kwargs={"pk": reservation.pk}))
|
[
"korimse@gmail.com"
] |
korimse@gmail.com
|
66af24b8e79c42a8dc7aa3ebdc1ace6b22534927
|
f9e8733ed87858b12bfee6b70ccdddd6a616b60a
|
/73.py
|
1b81f914bf328740233161f9aaa72772c4032d9f
|
[] |
no_license
|
MajestyLee/leetcode_TopInterview
|
c1c9c923d3bf42cd4777bb2a2ccd21654a7c6dbb
|
30b7d5acec716b7d754141835fc8bafe4411437e
|
refs/heads/master
| 2020-04-01T12:19:20.837383
| 2018-11-06T02:13:44
| 2018-11-06T02:13:44
| 153,200,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,201
|
py
|
'''
Given a m x n matrix, if an element is 0, set its entire row and column to 0. Do it in-place.
Example 1:
Input:
[
[1,1,1],
[1,0,1],
[1,1,1]
]
Output:
[
[1,0,1],
[0,0,0],
[1,0,1]
]
Example 2:
Input:
[
[0,1,2,0],
[3,4,5,2],
[1,3,1,5]
]
Output:
[
[0,0,0,0],
[0,4,5,0],
[0,3,1,0]
]
Follow up:
A straight forward solution using O(mn) space is probably a bad idea.
A simple improvement uses O(m + n) space, but still not the best solution.
Could you devise a constant space solution?
'''
class Solution(object):
def setZeroes(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
row = set()
col = set()
for i in range(0,len(matrix)):
for j in range(0,len(matrix[0])):
if matrix[i][j] == 0:
row.add(i)
col.add(j)
row = list(row)
col = list(col)
for i in range(0,len(row)):
matrix[row[i]] = [0 for _ in range(0,len(matrix[0]))]
for j in range(0,len(col)):
for jj in range(len(matrix)):
matrix[jj][col[j]] = 0
|
[
"binjie_lee@163.com"
] |
binjie_lee@163.com
|
b91382be96010e2e1aefacdcb707ef46b39f8400
|
3ca6302ebdc0e47d5d462435ad24a2886cfa5063
|
/64.py
|
5ce1111db6333ea60c31349788bb7a2df4797496
|
[] |
no_license
|
Sem31/PythonBasics
|
3859276820d484025d6c3d8f9efaf131b8626da8
|
d9bfd520b67056a3cbb747f7a4b71fe55871c082
|
refs/heads/master
| 2020-04-24T19:09:48.608293
| 2019-02-23T10:56:26
| 2019-02-23T10:56:26
| 172,203,384
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
#create csv file by this programs
import csv
with open("example1.csv",'w')as obj:
field = ["name","salary"]
writer = csv.DictWriter(obj, fieldnames=field)
writer.writeheader()
writer.writerow({'name':'bob','salary':10000})
writer.writerow({'name':'sem','salary':40000})
writer.writerow({'name':'kamlesh','salary':30000})
writer.writerow({'name':'vishal','salary':50000})
|
[
"semprajapat31@gmail.com"
] |
semprajapat31@gmail.com
|
43171e67ff9e36899ce8b565c03eaac899555a02
|
b7f3edb5b7c62174bed808079c3b21fb9ea51d52
|
/components/policy/tools/PRESUBMIT.py
|
8d6bc1a9cc400f3e06219f8a9d4ecd123cddc991
|
[
"BSD-3-Clause"
] |
permissive
|
otcshare/chromium-src
|
26a7372773b53b236784c51677c566dc0ad839e4
|
64bee65c921db7e78e25d08f1e98da2668b57be5
|
refs/heads/webml
| 2023-03-21T03:20:15.377034
| 2020-11-16T01:40:14
| 2020-11-16T01:40:14
| 209,262,645
| 18
| 21
|
BSD-3-Clause
| 2023-03-23T06:20:07
| 2019-09-18T08:52:07
| null |
UTF-8
|
Python
| false
| false
| 847
|
py
|
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
def RunOtherPresubmit(function_name, input_api, output_api):
# Apply the PRESUBMIT for components/policy/resources to run the syntax check.
presubmit_path = (
input_api.change.RepositoryRoot() + \
'/components/policy/resources/PRESUBMIT.py')
presubmit_content = input_api.ReadFile(presubmit_path)
global_vars = {}
exec (presubmit_content, global_vars)
return global_vars[function_name](input_api, output_api)
def CheckChangeOnUpload(input_api, output_api):
return RunOtherPresubmit("CheckChangeOnUpload", input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return RunOtherPresubmit("CheckChangeOnCommit", input_api, output_api)
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
a6a6984813486278c4dc89f5e5201d922504d0eb
|
fcaa66bb55cb96342fc673e88363337fac95a184
|
/MovieApp/migrations/0004_auto_20210610_1948.py
|
c749cbeeb8d6a5a28e0b123163bcbbfbf191e942
|
[] |
no_license
|
rushabhgediya38/MovieTicketBooking
|
3f0ab4fbea6011c47968ae0d50a42d8bacf4ffdb
|
beeb59d671d96418c0959ed072f4ffcf517a1b0c
|
refs/heads/main
| 2023-05-14T05:55:09.176174
| 2021-06-13T15:02:42
| 2021-06-13T15:02:42
| 375,613,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
# Generated by Django 3.2.4 on 2021-06-10 14:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('MovieApp', '0003_images'),
]
operations = [
migrations.CreateModel(
name='M_lang',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language', models.CharField(max_length=256)),
],
),
migrations.AddField(
model_name='movie',
name='M_lang',
field=models.ManyToManyField(to='MovieApp.M_lang'),
),
]
|
[
"rushabhgediya38@gmail.com"
] |
rushabhgediya38@gmail.com
|
baf02ed9910963e5ed29164ba414f88415d59e00
|
ae5bdb32f5ae61f422e537222601e0fe4f86739c
|
/py2app_tests/argv_app/setup.py
|
432f60efe00ec2498ecebe46d1699b3bb23c06bb
|
[
"MIT",
"Python-2.0"
] |
permissive
|
acclivity/py2app
|
beeefa84eaeaa40edfcbed25d4edb500ddd60a61
|
a3dafb2c559dc9be78ebe1c44887820f9451806c
|
refs/heads/master
| 2021-03-26T09:11:01.176301
| 2020-03-16T22:25:26
| 2020-03-16T22:25:26
| 247,691,716
| 0
| 0
|
NOASSERTION
| 2020-03-16T12:04:10
| 2020-03-16T12:04:09
| null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
from setuptools import setup
setup(
name='BasicApp',
app=['main.py'],
options=dict(py2app=dict(
argv_emulation=True,
)),
)
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
d4dd8d3af20c272a50ffd0226634bd7465a6f2ee
|
6ff12f51b9a1b9f751cec3df21813803d2455f1e
|
/tools/link_graph_generator.py
|
cf503594caed97072da3912f1fad3b5706416592
|
[] |
no_license
|
prdx/PoliteScrapper
|
5d40089bb399c3d08fb848355b73cdc530c8327c
|
e84a49fa197e484361d2e69421b32fd4240c884c
|
refs/heads/master
| 2020-03-23T18:36:21.340544
| 2018-08-01T21:48:14
| 2018-08-01T21:48:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,554
|
py
|
from bs4 import BeautifulSoup
import os
import pickle
files = [f for f in os.listdir('.') if os.path.isfile(f) and f.endswith(".xml")]
outlinks = {}
inlinks = {}
def generate_outlink_file():
print("Generating outlinks file ...")
# Generate outlinks
for xml in files:
try:
with open(xml, "rb") as f:
soup = BeautifulSoup(f, "lxml")
url = soup.id.value.text
outlinks[url] = soup.outlinks.value.text.split(",")
except Exception as e:
print("Error processing: " + xml)
print(e)
os.rename(xml, xml + ".fail")
# Dump the outlinks
with open("../output/outlinks.p", "wb") as out:
pickle.dump(outlinks, out, protocol=pickle.HIGHEST_PROTOCOL)
print("Done generating outlinks file ...")
print("Outlinks size: " + str(len(outlinks)) + " urls")
def generate_inlink_file():
print("Generating inlinks file ...")
# Generate inlinks
for key in outlinks:
for url in outlinks[key]:
try:
inlinks[url].append(key)
except KeyError:
inlinks[url] = [key]
except Exception as e:
print("Error processing: " + key)
print(e)
# Dump the inlinks
with open("../output/inlinks.p", "wb") as out:
pickle.dump(inlinks, out, protocol=pickle.HIGHEST_PROTOCOL)
print("Inlinks size: " + str(len(inlinks)) + " urls")
print("Done inlinks file ...")
generate_outlink_file()
generate_inlink_file()
|
[
"astungkara.project@gmail.com"
] |
astungkara.project@gmail.com
|
1b00196825631a6f44decdbf3560208ff455bf28
|
d354d2da2a6bd47aa0f545a0bf351e982882ea4c
|
/setup.py
|
03aded87728f2e3159fcc416da43efee5d4887cd
|
[
"MIT"
] |
permissive
|
acodebreaker/pywsd
|
27dffb27a0961dbe5d09e71cc4f18e3dba10bfdf
|
ec8dd4bead6108e04250591d1732afcc9b0fb1bb
|
refs/heads/master
| 2021-01-18T01:40:48.909216
| 2014-11-24T07:25:17
| 2014-11-24T07:25:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
#!/usr/bin/env python -*- coding: utf-8 -*-
#
# Python Word Sense Disambiguation (pyWSD)
#
# Copyright (C) 2014 alvations
# URL:
# For license information, see LICENSE.md
from distutils.core import setup
setup(
name='pywsd',
version='0.1',
packages=['pywsd',],
long_description='Python Implementations of Word Sense Disambiguation (WSD) technologies',
)
|
[
"alvations@gmail.com"
] |
alvations@gmail.com
|
838027b05c4975fc5f55b86184077144347a1bad
|
4f21e3301c1a8699745528177b3210b4f1a1f1d5
|
/week10/project2/library/settings.py
|
4dbdb1bf1faf2c3a9ac45fabe288d8e6aa05c0ca
|
[] |
no_license
|
ndina/webdev2019
|
7fd0250b662b378d55e24e931f82d0b2538d63a5
|
eae4808e2f0bfcdd5a366fd4692c041b96faaa0b
|
refs/heads/master
| 2020-05-03T22:05:12.392913
| 2019-05-04T02:46:56
| 2019-05-04T02:46:56
| 167,550,783
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,688
|
py
|
"""
Django settings for library project.
Generated by 'django-admin startproject' using Django 1.8.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u2iir6bmw(y%pu*23y%sm1u#8y#o7_qchko#=r*_rtqy_-ge+e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'library.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'library.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"dbom12360@gmail.com"
] |
dbom12360@gmail.com
|
e3d781a3f7d2d498cb5c6001e32a838461a0daa6
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_054/ch2_2020_09_16_11_34_55_516156.py
|
cb769d528b4f741eaac3317840c0153eb23c362a
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 143
|
py
|
# Está função é para calcular a velocidade média
def velocidade_media (d,t):
velocidade_media = d / t
return velocidade_média
|
[
"you@example.com"
] |
you@example.com
|
c0e29612bc1ab99f21ed31d148930eda30c512c3
|
2a67dc681af4c4b9ef7a8e18c2ff75377dc5b44f
|
/aws.ses.EventDestination.sns-destination-python/__main__.py
|
d32f60d788281d4b38651670141a088b90714d15
|
[] |
no_license
|
ehubbard/templates-aws
|
e323b693a18234defe6bd56ffcc64095dc58e3a1
|
2ae2e7a5d05490078017fed6d132dcdde1f21c63
|
refs/heads/master
| 2022-11-17T13:53:14.531872
| 2020-07-10T21:56:27
| 2020-07-10T21:56:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 315
|
py
|
import pulumi
import pulumi_aws as aws
sns = aws.ses.EventDestination("sns",
configuration_set_name=aws_ses_configuration_set["example"]["name"],
enabled=True,
matching_types=[
"bounce",
"send",
],
sns_destination={
"topic_arn": aws_sns_topic["example"]["arn"],
})
|
[
"jvp@justinvp.com"
] |
jvp@justinvp.com
|
aa103ea582f1fe1dccda82638cc5841b408a0c7a
|
321b4ed83b6874eeb512027eaa0b17b0daf3c289
|
/988/988.smallest-string-starting-from-leaf.233252752.Accepted.leetcode.py
|
22432d1b1812c2fa9c180ef407130c342025bc17
|
[] |
no_license
|
huangyingw/submissions
|
7a610613bdb03f1223cdec5f6ccc4391149ca618
|
bfac1238ecef8b03e54842b852f6fec111abedfa
|
refs/heads/master
| 2023-07-25T09:56:46.814504
| 2023-07-16T07:38:36
| 2023-07-16T07:38:36
| 143,352,065
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
class Solution(object):
def smallestFromLeaf(self, root):
self.result = "~"
def dfs(node, A):
if node:
A.append(chr(node.val + ord('a')))
if not node.left and not node.right:
self.result = min(self.result, "".join(reversed(A)))
dfs(node.left, A)
dfs(node.right, A)
A.pop()
dfs(root, [])
return self.result
|
[
"huangyingw@gmail.com"
] |
huangyingw@gmail.com
|
5cb9c51015c50cab850bea8216889f5c99c937d9
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_2_neat/16_0_2_Jormungandr_Revenge_of_the_pancakes.py
|
d9925b4d479f3e794bba1c134eedd620908d2b23
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,096
|
py
|
#!/usr/bin/env python
__author__ = 'Bill'
def check_pancakes(n):
"""(check_pancakes):
function to test for all face up
:param n: the pancakes string
"""
for ch in n:
if ch == '-':
return False
return True
def flip_pancakes(n):
"""(flip_pancakes):
function to flip pancakes
:param n: the pancakes string
"""
n = list(n)
dict = {'+':'-', '-':'+'}
first = n[0]
i = 0
for ch in n:
if ch != first:
break
i += 1
for j in xrange(i):
n[j] = dict[first]
n = "".join(n)
return n
from misc import input_, output_
num_cases, cases = input_('B-large.in')
Results = []
for case in cases:
case = case.rstrip('\n')
i = 0
face_up = check_pancakes(case)
if face_up == True:
Results.append(i)
else:
while check_pancakes(case) == False:
case = flip_pancakes(case)
i += 1
Results.append(i)
output_(Results, 'Revenge_of_the_pancakes_large.out')
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
56749342e68294136dbbbacb342a3d9b2f01f30b
|
18b3ad3b0e1f7f10969738251e1201d01dfbc6bf
|
/backup_files/samplepy/passbyvalue.py
|
26689727f2944f32dee1688daef3ff1dc4632725
|
[] |
no_license
|
sahthi/backup2
|
11d509b980e731c73733b1399a8143780779e75a
|
16bed38f0867fd7c766c2a008c8d43b0660f0cb0
|
refs/heads/master
| 2020-03-21T12:39:56.890129
| 2018-07-09T08:12:46
| 2018-07-09T08:12:46
| 138,565,151
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
def changeme(mylist):
mylist = [1,2,3,4 ]
print "values inside the function",mylist
return
mylist = [10,20,30]
changeme(mylist)
print"values outside the function ",mylist
|
[
"siddamsetty.sahithi@votarytech.com"
] |
siddamsetty.sahithi@votarytech.com
|
057695d4910d814affa1cef49fbca93b9b520c88
|
df690ac0484ff04cb63f71f528a9d0a0e557d6a3
|
/.history/ws_20210608130810.py
|
59216ed4c38672800e718b0909e4e451e853a45b
|
[] |
no_license
|
khanhdk0000/Mqtt-Web-Socket
|
437777c740c68d4197353e334f6fe6a629094afd
|
4f9e49a3817baa9ebc4e4f8dcffc21b6ea9d0134
|
refs/heads/master
| 2023-06-20T17:08:09.447381
| 2021-06-08T17:42:37
| 2021-06-08T17:42:37
| 375,090,458
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,612
|
py
|
from flask import Flask, jsonify, request
from flask_sock import Sock
import time
app = Flask(__name__)
sock = Sock(app)
import threading
BROKER = 'io.adafruit.com'
USER = 'khanhdk0000'
PASSWORD = 'aio_FfID10QWNVSKUC2j15nLtOSeckin'
TOPIC = 'khanhdk0000/feeds/'
LIGHT = 'light'
SOUND = 'sound'
TEMP = 'temp'
LCD = 'iot_led'
BUZZER = 'buzzer'
########
# USER = 'CSE_BBC'
# PASSWORD = 'aio_FfID10QWNVSKUC2j15nLtOSeckin'
# TOPIC = 'CSE_BBC/feeds/'
# USER1 = 'CSE_BBC1'
# PASSWORD1 = 'aio_FfID10QWNVSKUC2j15nLtOSeckin'
# TOPIC1 = 'CSE_BBC1/feeds/'
# LIGHT = 'bk-iot-light'
# SOUND = 'bk-iot-sound'
# TEMP = 'bk-iot-temp-humid'
# LCD = 'bk-iot-lcd'
# BUZZER = 'bk-iot-speaker'
resLight = '"id":"13","name":"LIGHT","data":"0","unit":""'
prevLight = resLight
resTemp = '"id":"7","name":"SOUND","data":"0","unit":""'
prevTemp = resTemp
resSound = '"id":"12","name":"TEMP-HUMID","data":"0","unit":""'
prevSound = resSound
def mqttGet(user, password,topic,device):
import paho.mqtt.client as mqtt
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
if rc == 0:
print('good')
else:
print('no good')
def on_disconnect(client, userdata, flags, rc=0):
print("Disconnected result code " + str(rc))
def on_message(client, userdata, message):
if device == LIGHT:
global resLight
message = str(message.payload.decode("utf-8"))
print(message)
resLight = message
elif device == TEMP:
global resTemp
message = str(message.payload.decode("utf-8"))
print(message)
resTemp = message
elif device == SOUND:
global resSound
message = str(message.payload.decode("utf-8"))
print(message)
resSound = message
client = mqtt.Client()
client.username_pw_set(username=user,password=password)
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.on_message = on_message
client.connect(BROKER, 1883, 60)
client.subscribe(topic)
client.loop_forever()
t1 = threading.Thread(target=mqttGet, name=mqttGet, args=(USER, PASSWORD,TOPIC + LIGHT, LIGHT))
t1.start()
t2 = threading.Thread(target=mqttGet, name=mqttGet, args=(USER, PASSWORD,TOPIC + TEMP, TEMP))
t2.start()
t3 = threading.Thread(target=mqttGet, name=mqttGet, args=(USER, PASSWORD,TOPIC + SOUND, SOUND))
t3.start()
def mqttPost(topic, user,pass,payload):
import paho.mqtt.publish as publish
publish.single(topic,hostname="io.adafruit.com",auth={"username":user, "password":pass},payload = payload)
@sock.route('/light')
def light(ws):
global resLight, prevLight
while True:
if prevLight == resLight:
continue
else:
ws.send(resLight)
prevLight = resLight
@sock.route('/sound')
def sound(ws):
global resSound, prevSound
while True:
if prevSound == resSound:
continue
else:
ws.send(resSound)
prevSound = resSound
@sock.route('/temp')
def temp(ws):
global resTemp, prevTemp
while True:
if prevTemp == resTemp:
continue
else:
ws.send(resTemp)
prevTemp = resTemp
@app.route('/postlcd', methods=["POST"])
def testpost():
input_json = request.get_json(force=True)
domain = input_json['data']
print('receive data', domain)
mqttPost(TOPIC+LCD, U)
return 'yea:' + domain
if __name__ == '__main__':
app.run(debug=True)
|
[
"khanhtran28092000@gmail.com"
] |
khanhtran28092000@gmail.com
|
09f47ffa874febc1dd80bb23531d909ac281739b
|
694c187c8a00bee8c670c1690170099bad9b16b3
|
/hindex.py
|
edded2784cbd958ce569e1997c2a49c5589810d0
|
[] |
no_license
|
ajayvenkat10/Competitive
|
301f220b6d296f7e34328f192c43c4d7ef208cb1
|
14f2ecebe10eb19f72cc412dd0c414b3b1de9b4d
|
refs/heads/master
| 2022-11-20T14:31:33.590099
| 2020-07-23T15:39:14
| 2020-07-23T15:39:14
| 281,599,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
t = int(input())
for _ in range(t):
n = int(input())
arr = list(map(int, input().split()))
final = [1]
val = 2
for i in range(1,len(arr)):
count = 0
for j in range(i+1):
if(arr[j] >= val):
count += 1
if(count>=val):
final.append(val)
val += 1
else:
final.append(val-1)
print("Case #%d: " % (_+1) , end="")
print(*final)
|
[
"37923623+ajayvenkat10@users.noreply.github.com"
] |
37923623+ajayvenkat10@users.noreply.github.com
|
6452090ca100845c839848f14ac2d04f85352f4d
|
934235f70a390a3ba0d7b464cddd10872f31cda3
|
/rango/server/.history/tango_with_django/rango/admin_20210103130028.py
|
361f6ca167ae05dc1771706293718383039c718e
|
[] |
no_license
|
deji100/Projects
|
6919041ba23e77a5c74e5ab7692bfcee38ececcb
|
17e64d954d1d7805be57ec5d8d4344e4944889e6
|
refs/heads/master
| 2023-04-30T05:25:03.143303
| 2021-05-20T15:00:43
| 2021-05-20T15:00:43
| 338,844,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
from django.contrib import admin
from .models import Category, Page, User
# Register your models here.
class PageInline(admin.StackedInline):
list_display = ('title', 'category', 'url')
# fields = ('title', 'url', 'category')
model
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name', 'views', 'likes')
# prepopulated_fields = {'slug': ('name',)}
inlines = [PageInline]
admin.site.register(Category, CategoryAdmin)
admin.site.register(Page, PageAdmin)
admin.site.register(User)
|
[
"68882568+deji100@users.noreply.github.com"
] |
68882568+deji100@users.noreply.github.com
|
0ea09ec878674f42ce2fb633727af303b0ff9662
|
830398bc5ae951b153ff695a40be7239742bc73e
|
/exercises/parse_dhcp_snooping.py
|
27114f9e94d30bcea5c6296a1383f9c2e461987f
|
[] |
no_license
|
dmikos/pyneng
|
ff67f1d617a97d73103a7785a7bf86140e7baa82
|
543fb0d9fc63a2afee45d2465af3a4c3966e4a86
|
refs/heads/master
| 2021-01-25T14:56:44.181140
| 2018-04-23T04:31:00
| 2018-04-23T04:31:00
| 123,739,447
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 669
|
py
|
# -*- coding: utf-8 -*-
import re
#'00:09:BB:3D:D6:58 10.1.10.2 86250 dhcp-snooping 10 FastEthernet0/1'
regex = re.compile('(?P<mac>\S+) +(?P<ip>\S+) +\d+ +\S+ +(?P<vlan>\d+) +(?P<port>\S+)')
result = []
with open('dhcp_snooping.txt') as data:
for line in data:
match = regex.search(line)
if match:
result.append(match.groupdict())
print('К коммутатору подключено {} устройства'.format(len(result)))
for num, comp in enumerate(result, 1):
print('Параметры устройства {}:'.format(num))
for key in comp:
print('{:10}: {:10}'.format(key,comp[key]))
|
[
"dkostinov@gmail.com"
] |
dkostinov@gmail.com
|
8435baa0b8beaab331ff8904a8889f896a8d23c0
|
9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56
|
/google/ads/googleads/v9/services/services/third_party_app_analytics_link_service/transports/__init__.py
|
502d5cf2169f355fb53779b340f3900e0e913770
|
[
"Apache-2.0"
] |
permissive
|
GerhardusM/google-ads-python
|
73b275a06e5401e6b951a6cd99af98c247e34aa3
|
676ac5fcb5bec0d9b5897f4c950049dac5647555
|
refs/heads/master
| 2022-07-06T19:05:50.932553
| 2022-06-17T20:41:17
| 2022-06-17T20:41:17
| 207,535,443
| 0
| 0
|
Apache-2.0
| 2019-09-10T10:58:55
| 2019-09-10T10:58:55
| null |
UTF-8
|
Python
| false
| false
| 1,141
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import ThirdPartyAppAnalyticsLinkServiceTransport
from .grpc import ThirdPartyAppAnalyticsLinkServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ThirdPartyAppAnalyticsLinkServiceTransport]]
_transport_registry["grpc"] = ThirdPartyAppAnalyticsLinkServiceGrpcTransport
__all__ = (
"ThirdPartyAppAnalyticsLinkServiceTransport",
"ThirdPartyAppAnalyticsLinkServiceGrpcTransport",
)
|
[
"noreply@github.com"
] |
GerhardusM.noreply@github.com
|
ce7050ab38a7683c7b476a80901ac6beac9d0799
|
4fbd844113ec9d8c526d5f186274b40ad5502aa3
|
/algorithms/python3/maximize_distance_to_closest_person.py
|
37e744aa546a7f515c70e1f156bc63f0f499ee8d
|
[] |
no_license
|
capric8416/leetcode
|
51f9bdc3fa26b010e8a1e8203a7e1bcd70ace9e1
|
503b2e303b10a455be9596c31975ee7973819a3c
|
refs/heads/master
| 2022-07-16T21:41:07.492706
| 2020-04-22T06:18:16
| 2020-04-22T06:18:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,203
|
py
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
In a row of seats, 1 represents a person sitting in that seat, and 0 represents that the seat is empty.
There is at least one empty seat, and at least one person sitting.
Alex wants to sit in the seat such that the distance between him and the closest person to him is maximized.
Return that maximum distance to closest person.
Example 1:
Input: [1,0,0,0,1,0,1]
Output: 2
Explanation:
If Alex sits in the second open seat (seats[2]), then the closest person has distance 2.
If Alex sits in any other open seat, the closest person has distance 1.
Thus, the maximum distance to the closest person is 2.
Example 2:
Input: [1,0,0,0]
Output: 3
Explanation:
If Alex sits in the last seat, the closest person is 3 seats away.
This is the maximum distance possible, so the answer is 3.
Note:
1 <= seats.length <= 20000
seats contains only 0s or 1s, at least one 0, and at least one 1.
"""
""" ==================== body ==================== """
class Solution:
def maxDistToClosest(self, seats):
"""
:type seats: List[int]
:rtype: int
"""
""" ==================== body ==================== """
|
[
"capric8416@gmail.com"
] |
capric8416@gmail.com
|
a4a27e3eb0c39273105293f96a89dc9b05e6f10a
|
b6a84594f8c29d968014faaddd49abeb7537a5fc
|
/python/1040.moving-stones-until-consecutive-ii.py
|
799deed3b361b4636ffa827b1e859308649b708d
|
[] |
no_license
|
nickyfoto/lc
|
8a6af3df114e693e265d0ede03f4d4e1283e010e
|
3633b4df3e24968057c7d684689b931c5a8032d3
|
refs/heads/master
| 2020-09-16T19:23:07.765917
| 2020-06-07T17:18:06
| 2020-06-07T17:18:06
| 223,866,098
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,913
|
py
|
#
# @lc app=leetcode id=1040 lang=python3
#
# [1040] Moving Stones Until Consecutive II
#
# https://leetcode.com/problems/moving-stones-until-consecutive-ii/description/
#
# algorithms
# Medium (52.07%)
# Likes: 152
# Dislikes: 231
# Total Accepted: 4.5K
# Total Submissions: 8.7K
# Testcase Example: '[7,4,9]'
#
# On an infinite number line, the position of the i-th stone is given by
# stones[i]. Call a stone an endpoint stone if it has the smallest or largest
# position.
#
# Each turn, you pick up an endpoint stone and move it to an unoccupied
# position so that it is no longer an endpoint stone.
#
# In particular, if the stones are at say, stones = [1,2,5], you cannot move
# the endpoint stone at position 5, since moving it to any position (such as 0,
# or 3) will still keep that stone as an endpoint stone.
#
# The game ends when you cannot make any more moves, ie. the stones are in
# consecutive positions.
#
# When the game ends, what is the minimum and maximum number of moves that you
# could have made? Return the answer as an length 2 array: answer =
# [minimum_moves, maximum_moves]
#
#
#
# Example 1:
#
#
# Input: [7,4,9]
# Output: [1,2]
# Explanation:
# We can move 4 -> 8 for one move to finish the game.
# Or, we can move 9 -> 5, 4 -> 6 for two moves to finish the game.
#
#
#
# Example 2:
#
#
# Input: [6,5,4,3,10]
# Output: [2,3]
# We can move 3 -> 8 then 10 -> 7 to finish the game.
# Or, we can move 3 -> 7, 4 -> 8, 5 -> 9 to finish the game.
# Notice we cannot move 10 -> 2 to finish the game, because that would be an
# illegal move.
#
#
#
# Example 3:
#
#
# Input: [100,101,104,102,103]
# Output: [0,0]
#
#
#
#
#
# Note:
#
#
# 3 <= stones.length <= 10^4
# 1 <= stones[i] <= 10^9
# stones[i] have distinct values.
#
#
#
#
#
#
#
#
#
# @lc code=start
class Solution:
def numMovesStonesII(self, stones):
pass
# @lc code=end
|
[
"nickyfoto@gmail.com"
] |
nickyfoto@gmail.com
|
9adf50d27141869fb0693ddeb11ca31431191545
|
bd93fa910151c278be8249055bc084e5a5c35a6a
|
/Python/DjangoTest2/booktest/models.py
|
3735ebb85498b072da8a92f26cec7a80e612790c
|
[] |
no_license
|
ahojcn/practice-code
|
bd81595b80239cd2550183093566bd536a83ed3f
|
b65f4e76271479269463e92fd3fd41585c2ac792
|
refs/heads/master
| 2021-07-10T14:15:08.036592
| 2020-07-09T11:32:16
| 2020-07-09T11:32:16
| 153,059,349
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
from django.db import models
# Create your models here.
# 创建模型
class BookInfo(models.Model):
"""图书模型类"""
# 图书名
btitle = models.CharField(max_length=20)
# 出版日期
bpub_date = models.DateField()
def __str__(self):
return self.btitle
class HeroInfo(models.Model):
"""书中的英雄人物类"""
# 英雄名
hname = models.CharField(max_length=20)
# 性别 Boolean 类型,默认 False 代表男
hgender = models.BooleanField(default=False)
# 备注
hcomment = models.CharField(max_length=128)
# 关系
hbook = models.ForeignKey(BookInfo, on_delete=None)
def __str__(self):
return self.hname
|
[
"hanoi_ahoj@icloud.com"
] |
hanoi_ahoj@icloud.com
|
564d81d0051cf261ea8cf3a8060afb2cc81c2406
|
718f4a6f53da14dbd79031928900a26c4de65ccb
|
/optimize_NMDA_KIN2.py
|
bf70c8a044f4ff48aa4d86895cd5e68a6e41e55f
|
[] |
no_license
|
neurosutras/CA1Sim
|
ff37e5ae96cc00d923bbcf333d75842c34156b5b
|
9a5796e5de9b9be477d61837c164fcbccbe3c8ce
|
refs/heads/master
| 2023-04-08T01:39:09.559475
| 2022-01-13T20:20:45
| 2022-01-13T20:20:45
| 29,497,263
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,046
|
py
|
__author__ = 'Aaron D. Milstein'
from specify_cells import *
from plot_results import *
import scipy.optimize as optimize
import random
"""
This simulation uses scipy.optimize to iterate through NMDA_KIN mechanism parameters to fit target EPSP kinetics.
"""
#morph_filename = 'EB1-early-bifurcation.swc'
morph_filename = 'EB2-late-bifurcation.swc'
#mech_filename = '043015 pas_exp_scale kdr ka_scale ih_sig_scale - EB2'
#mech_filename = '072515 optimized basal ka_scale dend_sh_ar_nas - EB2'
mech_filename = '102915 interim dendritic excitability'
def synaptic_kinetics_error(x, plot=0):
"""
:param x: list of parameters
:param plot: int or bool: method can be called manually to compare actual to target and fit waveforms
:return: float: Error
"""
spike_times = h.Vector([equilibrate])
for i, syn in enumerate(stim_syn_list):
syn.target(syn_type).kon = x[0]
syn.target(syn_type).koff = x[1]
syn.target(syn_type).CC = x[2]
syn.target(syn_type).CO = x[3]
syn.target(syn_type).Beta = x[4]
syn.target(syn_type).Alpha = x[5]
syn.source.play(spike_times)
sim.run(v_init)
t = np.array(sim.tvec)
g = np.array(sim.rec_list[0]['vec'])
interp_t = np.arange(0, duration, 0.001)
interp_g = np.interp(interp_t, t, g)
"""
Rc = np.interp(interp_t, t, np.array(sim.rec_list[1]['vec']))
Ro = np.interp(interp_t, t, np.array(sim.rec_list[2]['vec']))
Rb = np.interp(interp_t, t, np.array(sim.rec_list[3]['vec']))
Ro_peak = np.max(Ro)
Ro_peak_loc = np.where(Ro == Ro_peak)[0][0]
Rc_max = Ro_peak + Rc[Ro_peak_loc] + Rb[Ro_peak_loc]
"""
start, end = time2index(interp_t, equilibrate, duration)
y = interp_g[start:end]
interp_t = interp_t[start:end]
interp_t -= interp_t[0]
amp = np.max(y)
t_peak = np.where(y == amp)[0][0]
y /= amp
rise_10 = np.where(y[0:t_peak] >= 0.1)[0][0]
rise_90 = np.where(y[0:t_peak] >= 0.9)[0][0]
rise_tau = interp_t[rise_90] - interp_t[rise_10]
decay_90 = np.where(y[t_peak:] <= 0.9)[0][0]
decay_10 = np.where(y[t_peak:] <= 0.1)[0]
if decay_10.any():
decay_tau = interp_t[decay_10[0]] - interp_t[decay_90]
else:
decay_tau = 1000. # large error if trace has not decayed to 10% in 1 second
result = {'rise_tau': rise_tau, 'decay_tau': decay_tau} # , 'Rc_max': Rc_max}
spike_times = h.Vector([equilibrate + i * 10. for i in range(5)])
for i, syn in enumerate(stim_syn_list):
syn.source.play(spike_times)
sim.run(v_init)
for i, syn in enumerate(stim_syn_list):
syn.source.play(h.Vector())
t = np.array(sim.tvec)
g = np.array(sim.rec_list[0]['vec'])
interp_t = np.arange(0, duration, 0.001)
interp_g = np.interp(interp_t, t, g)
start, end = time2index(interp_t, equilibrate, duration)
yf = interp_g[start:end]
interp_t = interp_t[start:end]
interp_t -= interp_t[0]
facil_amp = np.max(yf)
result['facilitation'] = facil_amp / amp
yf /= amp
Err = 0.
for target in result:
Err += ((target_val[target] - result[target])/target_range[target])**2.
print('[kon, koff, CC, CO, Beta, Alpha]: [%.3f, %.3f, %.3f, %.3f, %.3f, %.3f], Error: %.3E, Rise: %.3f, Decay: '
'%.3f, facilitation: %.2f' % (x[0], x[1], x[2], x[3], x[4], x[5], Err, rise_tau, decay_tau,
result['facilitation']))
if plot:
plt.plot(interp_t, y)
plt.plot(interp_t, yf)
plt.show()
plt.close()
return Err
equilibrate = 250. # time to steady-state
duration = 1250.
v_init = -67.
num_syns = 1
cell = CA1_Pyr(morph_filename, mech_filename, full_spines=True)
cell.zero_na()
syn_type = 'NMDA_KIN2'
sim = QuickSim(duration)
# look for a trunk bifurcation
trunk_bifurcation = [trunk for trunk in cell.trunk if len(trunk.children) > 1 and trunk.children[0].type == 'trunk' and
trunk.children[1].type == 'trunk']
# get where the thickest trunk branch gives rise to the tuft
if trunk_bifurcation: # follow the thicker trunk
trunk = max(trunk_bifurcation[0].children[:2], key=lambda node: node.sec(0.).diam)
trunk = (node for node in cell.trunk if cell.node_in_subtree(trunk, node) and 'tuft' in (child.type for child in
node.children)).next()
else:
trunk = (node for node in cell.trunk if 'tuft' in (child.type for child in node.children)).next()
tuft = (child for child in trunk.children if child.type == 'tuft').next()
trunk = trunk_bifurcation[0]
#sim.append_rec(cell, trunk, loc=1., description='trunk vm')
spine_list = []
spine_list.extend(trunk.spines)
for spine in spine_list:
syn = Synapse(cell, spine, [syn_type], stochastic=0)
local_random = random.Random()
local_random.seed(0)
stim_syn_list = [spine_list[i].synapses[0] for i in local_random.sample(range(len(spine_list)), num_syns)]
for i, syn in enumerate(stim_syn_list):
syn.target(syn_type).mg = 0.1
#syn.target(syn_type).gmax = 0.005
sim.append_rec(cell, syn.node, object=syn.target(syn_type), param='_ref_g')
sim.append_rec(cell, syn.node, object=syn.target(syn_type), param='_ref_Rc')
sim.append_rec(cell, syn.node, object=syn.target(syn_type), param='_ref_Ro')
sim.append_rec(cell, syn.node, object=syn.target(syn_type), param='_ref_Rb')
#the target values and acceptable ranges
target_val = {'rise_tau': 3., 'decay_tau': 75., 'Rc_max': 0.6, 'facilitation': 1.3}
# extrapolating from Chen...Murphy and Harnett...Magee, Popescu et al.
target_range = {'rise_tau': 0.1, 'decay_tau': .5, 'Rc_max': 0.01, 'facilitation': 0.01}
#the initial guess and bounds
#x = [kon, koff, CC, CO, Beta, Alpha)
#x0 = [10., .02, 1., 0.1, 0.04, 0.09]
#x0 = [26.414, 1.903, 3.185, 5.119, 0.274, 0.0299]
#x0 = [44.35, 2.46, 10.34, 1.06, 0.40, 0.045]
x0 = [85.47, 0.68, 9.48, 2.56, 0.72, 0.078]
xmin = [10., .01, .1, .1, .01, .01]
xmax = [100., 10., 20., 20., 1., 1.]
#x1 = [1099.70, 0.07, 1.70, 14.12, 4.64, 0.19] # old NMDA_KIN2, unrealistic kon
x1 = [68.74, 1.43, 5.86, 3.32, 0.270, 0.034]
mytakestep = Normalized_Step(x0, xmin, xmax)
minimizer_kwargs = dict(method=null_minimizer)
"""
result = optimize.basinhopping(synaptic_kinetics_error, x0, niter=720, niter_success=200, disp=True, interval=20,
minimizer_kwargs=minimizer_kwargs, take_step=mytakestep)
synaptic_kinetics_error(result.x, plot=1)
polished_result = optimize.minimize(synaptic_kinetics_error, result.x, method='Nelder-Mead', options={'ftol': 1e-3,
'xtol': 1e-3, 'disp': True})
"""
polished_result = optimize.minimize(synaptic_kinetics_error, x0, method='Nelder-Mead', options={'ftol': 1e-3,
'xtol': 1e-3, 'disp': True})
synaptic_kinetics_error(polished_result.x, plot=1)
#synaptic_kinetics_error(x1, plot=1)
|
[
"neurosutras@gmail.com"
] |
neurosutras@gmail.com
|
da748d34cb6a27059cecf0ee84bd84376e2809bf
|
d5ad13232e3f1ced55f6956bc4cbda87925c8085
|
/cc_mcc_seq/SNVINDEL/tmp/3.1_tumor_minus_normal_exome_somatic_number/1_tumor_minus_normal_somatic.py
|
fb9fd44707bd9d72ef21d0edd8631473db5d86f3
|
[] |
no_license
|
arvin580/SIBS
|
c0ba9a8a41f59cb333517c286f7d80300b9501a2
|
0cc2378bf62359ec068336ea4de16d081d0f58a4
|
refs/heads/master
| 2021-01-23T21:57:35.658443
| 2015-04-09T23:11:34
| 2015-04-09T23:11:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,722
|
py
|
def tumor_minus_normal_to_somatic(tumorFile,normalFile,oFile) :
dict_Normal=dict()
ouFile=open(oFile,'w')
inFile=open(normalFile)
for line in inFile :
line=line.strip()
fields=line.split('\t')
k='\t'.join(fields[1:-1])
dict_Normal[k]=1
inFile.close()
inFile=open(tumorFile)
for line in inFile :
line=line.strip()
fields=line.split('\t')
k='\t'.join(fields[1:-1])
if k not in dict_Normal :
ouFile.write(line+'\n')
ouFile.close()
tumor_minus_normal_to_somatic('sum_snp.exome_summary.pass012.ICC10A','sum_snp34.exome_summary.pass012.ICC10B','sum_snp.exome_summary.pass012.ICC10')
tumor_minus_normal_to_somatic('sum_snp.exome_summary.pass012.ICC4A','sum_snp34.exome_summary.pass012.ICC4B','sum_snp.exome_summary.pass012.ICC4')
tumor_minus_normal_to_somatic('sum_snp.exome_summary.pass012.ICC5A','sum_snp34.exome_summary.pass012.ICC5B','sum_snp.exome_summary.pass012.ICC5')
tumor_minus_normal_to_somatic('sum_snp.exome_summary.pass012.ICC9A','sum_snp34.exome_summary.pass012.ICC9B','sum_snp.exome_summary.pass012.ICC9')
tumor_minus_normal_to_somatic('sum_snp2.exome_summary.pass012.CHC10A','sum_snp34.exome_summary.pass012.CHC10B','sum_snp.exome_summary.pass012.CHC10')
tumor_minus_normal_to_somatic('sum_snp2.exome_summary.pass012.CHC5A','sum_snp34.exome_summary.pass012.CHC5B','sum_snp.exome_summary.pass012.CHC5')
tumor_minus_normal_to_somatic('sum_snp2.exome_summary.pass012.CHC6A','sum_snp34.exome_summary.pass012.CHC6B','sum_snp.exome_summary.pass012.CHC6')
tumor_minus_normal_to_somatic('sum_snp2.exome_summary.pass012.CHC7A','sum_snp34.exome_summary.pass012.CHC7B','sum_snp.exome_summary.pass012.CHC7')
|
[
"sunhanice@gmail.com"
] |
sunhanice@gmail.com
|
d22b6020a2b3d2bfacf12fcb9cb93b0bc3d641d9
|
a30362e51cb3291daf26d0c62e56c42caeec837f
|
/python/codeup/solved/_1068.py
|
87813e822e0529ad4c300ab4f9c21997748b240f
|
[] |
no_license
|
TERADA-DANTE/algorithm
|
03bf52764c6fcdb93d7c8a0ed7a672834f488412
|
20bdfa1a5a6b9c378e588b17073e77a0126f7339
|
refs/heads/master
| 2023-04-14T21:40:11.250022
| 2023-04-12T13:00:37
| 2023-04-12T13:00:37
| 288,335,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
n = int(input())
if 90 <= n:
print('A')
elif 70 <= n:
print('B')
elif 40 <= n:
print('C')
elif 0 <= n:
print('D')
|
[
"55175301+TERADA-DANTE@users.noreply.github.com"
] |
55175301+TERADA-DANTE@users.noreply.github.com
|
b3faa68ddf38c6d15ad43fc82a48744cdae5c15b
|
56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e
|
/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY2JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544840/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_225/run_cfg.py
|
c1d2c055a93f6f5950d43132a49f5e864889fafd
|
[] |
no_license
|
rmanzoni/HTT
|
18e6b583f04c0a6ca10142d9da3dd4c850cddabc
|
a03b227073b2d4d8a2abe95367c014694588bf98
|
refs/heads/master
| 2016-09-06T05:55:52.602604
| 2014-02-20T16:35:34
| 2014-02-20T16:35:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,499
|
py
|
import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY2JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544840/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DY2JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_2006.root',
'/store/cmst3/user/cmgtools/CMG/DY2JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_2007.root',
'/store/cmst3/user/cmgtools/CMG/DY2JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_2008.root',
'/store/cmst3/user/cmgtools/CMG/DY2JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_2009.root',
'/store/cmst3/user/cmgtools/CMG/DY2JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_201.root')
)
|
[
"riccardo.manzoni@cern.ch"
] |
riccardo.manzoni@cern.ch
|
fc8f7fd662fe988e7f5f65c94869efdafc5af3eb
|
7f0548b7191b7589712af19baebafddae1d0505f
|
/dojoassignments/python/django/full_stack_django/login_and_registration/apps/login_registration_app/migrations/0001_initial.py
|
2e5994f927a8fa2ce9b4a5d96fd6c594f3453aa5
|
[] |
no_license
|
mtjhartley/codingdojo
|
dd8eab1bd61fb847e44766e89fe3db2340468102
|
65dc558d19adbe62f85ad61c32cb1c392b56567c
|
refs/heads/master
| 2022-12-14T23:06:11.927445
| 2017-08-16T21:08:35
| 2017-08-16T21:08:35
| 92,218,728
| 1
| 5
| null | 2022-12-07T23:59:48
| 2017-05-23T20:46:03
|
Python
|
UTF-8
|
Python
| false
| false
| 884
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-06-20 19:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('password', models.CharField(max_length=45)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"mtjhartley@gmail.com"
] |
mtjhartley@gmail.com
|
1bc2bad1c8d403cdc99de557444a6e0a0f503eb2
|
fe3759747f709a41e5ff3acf78872dd6b74f772a
|
/samples/openapi3/client/petstore/python-experimental/petstore_api/model/animal.py
|
81432c292c6459b54e18f5be8a654084c4f140d5
|
[
"Apache-2.0"
] |
permissive
|
Januson/openapi-generator
|
c50e3b52765e41adba9712d745918cea39dfa490
|
5b6b4c9d4829b57716741dc35b3f1033e5483784
|
refs/heads/master
| 2022-10-19T04:16:38.042495
| 2022-04-23T08:42:21
| 2022-04-23T08:42:21
| 238,659,737
| 0
| 0
|
Apache-2.0
| 2023-09-05T01:01:23
| 2020-02-06T10:12:38
|
Java
|
UTF-8
|
Python
| false
| false
| 2,359
|
py
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import typing # noqa: F401
from frozendict import frozendict # noqa: F401
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from petstore_api.schemas import ( # noqa: F401
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
UUIDSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
Configuration,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
Int32Base,
Int64Base,
Float32Base,
Float64Base,
NumberBase,
UUIDBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
class Animal(
DictSchema
):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
_required_property_names = set((
'className',
))
className = StrSchema
color = StrSchema
@classmethod
@property
def _discriminator(cls):
return {
'className': {
'Cat': Cat,
'Dog': Dog,
}
}
def __new__(
cls,
*args: typing.Union[dict, frozendict, ],
className: className,
color: typing.Union[color, Unset] = unset,
_configuration: typing.Optional[Configuration] = None,
**kwargs: typing.Type[Schema],
) -> 'Animal':
return super().__new__(
cls,
*args,
className=className,
color=color,
_configuration=_configuration,
**kwargs,
)
from petstore_api.model.cat import Cat
from petstore_api.model.dog import Dog
|
[
"noreply@github.com"
] |
Januson.noreply@github.com
|
fd96964145fbc06b436ee1ecbbf561c15f201c00
|
caf192dbc1ca90fee18bb4ce170d37eb14870ec5
|
/Chapter-5/7. Caesar cipher.py
|
f827a177676fc978c4d7d8bfee8324bfba34dc4a
|
[] |
no_license
|
Dfredude/PythonZelle
|
858b00f5eacce841173c64b3cecd978dedbeb145
|
1923fe84df604968eebc5269f23b7c0f167d55f0
|
refs/heads/main
| 2023-08-30T21:45:57.070344
| 2021-10-17T01:32:57
| 2021-10-17T01:32:57
| 359,041,963
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
def main():
#Get plaintext(p_text) and key(x) from the user
p_text = input("Enter the message you'd like encrypted.\n")
key = eval(input("What's the key? : "))
p_text = p_text.lower()
#Create string of letters
table = "abcdefghijklmnopqrstuvwxyz"
#Convert plaintext to ciphertext(c_text) using cipher loop
c_text = ""
for ch in p_text:
c_text = c_text + (table[((ord(ch)) - 97) + key % 52])
print("Your encoded message is {0}.".format(c_text))
main()
|
[
"dominguezlucio@outlook.com"
] |
dominguezlucio@outlook.com
|
80338f57e4494dc5fd84346bfab8cd6f883a4347
|
b5dabe2e6da0e53498650b3c3f3f944c20f3e050
|
/dolo/compiler/function_compiler_numexpr.py
|
e20ec37370ffaf43ad7e04c17d62a3028aaf64d8
|
[
"BSD-2-Clause"
] |
permissive
|
christophe-gouel/dolo
|
12d582ecf3289aa9168f5d825da83a6284d5a669
|
d9aef6d78d19899e2669e49ee6b7ad9aacf0e35d
|
refs/heads/master
| 2020-12-24T09:31:19.389548
| 2018-01-04T20:42:19
| 2018-01-04T20:42:19
| 6,064,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,105
|
py
|
from __future__ import division
from dolo.symbolic.derivatives import DerivativesTree
from dolo.symbolic.symbolic import TSymbol
from dolo.compiler.function_compiler import compile_multiargument_function as compile_multiargument_function_regular
DerivativesTree.symbol_type = TSymbol
def compile_multiargument_function(equations, args_list, args_names, parms, fname='anonymous_function', diff=True, return_text=False, order='rows'):
return compile_multiargument_function_regular(equations, args_list, args_names, parms, fname=fname, diff=diff, return_text=return_text, use_numexpr=True, order=order)
if __name__ == '__main__':
import sympy
from pprint import pprint
[w,x,y,z,t] = vars = sympy.symbols('w, x, y, z, t')
[a,b,c,d] = parms = sympy.symbols('a, b, c, d')
[k_1,k_2] = s_sym = sympy.symbols('k_1, k_2')
[x_1,x_2] = x_sym = sympy.symbols('x_1, x_2')
args_list = [
s_sym,
x_sym
]
from sympy import exp
eqs = [
x + y*k_2 + z*exp(x_1 + t),
(y + z)**0.3,
z,
(k_1 + k_2)**0.3,
k_2**x_1
]
sdict = {s:eqs[i] for i,s in enumerate(vars) }
from dolo.misc.triangular_solver import solve_triangular_system
order = solve_triangular_system(sdict, return_order=True)
ordered_vars = [ v for v in order ]
ordered_eqs = [ eqs[vars.index(v)] for v in order ]
pprint(ordered_vars)
pprint(ordered_eqs)
import numpy
floatX = numpy.float32
s0 = numpy.array( [2,5], dtype=floatX)
x0 = numpy.array( [2,2], dtype=floatX)
p0 = numpy.array( [4,3], dtype=floatX)
N = 2000
s1 = numpy.column_stack( [s0]*N )
x1 = numpy.column_stack( [x0]*N )
p1 = numpy.array( [4,3, 6, 7], dtype=floatX )
# f = create_fun()
#
# test = f(s1,x1,p0)
# print(test)
args_names = ['s','x']
#
#
solution = solve_triangular_system(sdict)
vals = [sympy.sympify(solution[v]) for v in ordered_vars]
from dolo.compiler.compiling import compile_multiargument_function as numpy_compiler
from dolo.compiler.compiling_theano import compile_multiargument_function as theano_compiler
f_numexpr = compile_multiargument_function( vals, args_list, args_names, parms )
f_numpy = numpy_compiler( vals, args_list, args_names, parms )
f_theano = theano_compiler( vals, args_list, args_names, parms )
n_exp = 1000
import time
r = time.time()
for i in range(n_exp):
res_numexpr = f_numexpr(s1,x1,p1)
# res = numpy.row_stack(res)
s = time.time()
print('Time (numexpr) : '+ str(s-r))
r = time.time()
for i in range(n_exp):
res_theano = f_theano(s1,x1,p1)
# res = numpy.row_stack(res)
s = time.time()
print('Time (theano) : '+ str(s-r))
r = time.time()
for i in range(n_exp):
res_numpy = f_numpy(s1,x1,p1)
# res = numpy.row_stack(res)
s = time.time()
print('Time (numpy) : '+ str(s-r))
print( abs(res_numpy - res_theano).max() )
print( abs(res_numexpr - res_numpy).max() )
|
[
"pablo.winant@gmail.com"
] |
pablo.winant@gmail.com
|
91dbf8f944594010b21f4e33cdd5c303b603daa0
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-network/azure/mgmt/network/v2018_02_01/models/outbound_nat_rule.py
|
509f9e9922798df037d6dab645f99d2111cc92f6
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,886
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class OutboundNatRule(SubResource):
"""Outbound NAT pool of the load balancer.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:param allocated_outbound_ports: The number of outbound ports to be used
for NAT.
:type allocated_outbound_ports: int
:param frontend_ip_configurations: The Frontend IP addresses of the load
balancer.
:type frontend_ip_configurations:
list[~azure.mgmt.network.v2018_02_01.models.SubResource]
:param backend_address_pool: Required. A reference to a pool of DIPs.
Outbound traffic is randomly load balanced across IPs in the backend IPs.
:type backend_address_pool:
~azure.mgmt.network.v2018_02_01.models.SubResource
:param provisioning_state: Gets the provisioning state of the PublicIP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'backend_address_pool': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'allocated_outbound_ports': {'key': 'properties.allocatedOutboundPorts', 'type': 'int'},
'frontend_ip_configurations': {'key': 'properties.frontendIPConfigurations', 'type': '[SubResource]'},
'backend_address_pool': {'key': 'properties.backendAddressPool', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(OutboundNatRule, self).__init__(**kwargs)
self.allocated_outbound_ports = kwargs.get('allocated_outbound_ports', None)
self.frontend_ip_configurations = kwargs.get('frontend_ip_configurations', None)
self.backend_address_pool = kwargs.get('backend_address_pool', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.name = kwargs.get('name', None)
self.etag = kwargs.get('etag', None)
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
bedf0931ccef770750040887a803cdba60d8d515
|
de1f9d660cfb738afdb66e4a2d63a4577c07d9c6
|
/test/webapi/controllers/test_wmts.py
|
ad52d78c1aaca58531a54c0ef0ecba42c5079c04
|
[
"MIT"
] |
permissive
|
rabaneda/xcube
|
db47eb416db85df891a924063482a7943cae9d4f
|
0d38ca513987184dbc4a37da1616e4076964d0f1
|
refs/heads/master
| 2020-11-24T00:11:17.107630
| 2020-02-11T10:11:34
| 2020-02-11T10:11:34
| 227,877,138
| 0
| 0
|
MIT
| 2019-12-13T16:14:51
| 2019-12-13T16:14:50
| null |
UTF-8
|
Python
| false
| false
| 703
|
py
|
import os
import unittest
from test.webapi.helpers import get_res_test_dir, new_test_service_context
from xcube.webapi.controllers.wmts import get_wmts_capabilities_xml
class WmtsControllerTest(unittest.TestCase):
def test_get_wmts_capabilities_xml(self):
self.maxDiff = None
with open(os.path.join(get_res_test_dir(), 'WMTSCapabilities.xml')) as fp:
expected_capabilities = fp.read()
ctx = new_test_service_context()
capabilities = get_wmts_capabilities_xml(ctx, 'http://bibo')
print(80 * '=')
print(capabilities)
print(80 * '=')
self.assertEqual(expected_capabilities.replace(' ', ''), capabilities.replace(' ', ''))
|
[
"norman.fomferra@gmail.com"
] |
norman.fomferra@gmail.com
|
a21dfa9182883f7045cd35880f722f3d9a36a0ab
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/tests/terraform/checks/resource/azure/test_SynapseWorkspaceEnablesDataExfilProtection.py
|
2f0a8e8e46b503edb13ed42ed956bc6d6a70830a
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,453
|
py
|
import unittest
from pathlib import Path
from checkov.runner_filter import RunnerFilter
from checkov.terraform.checks.resource.azure.SynapseWorkspaceEnablesDataExfilProtection import check
from checkov.terraform.runner import Runner
class TestSynapseWorkspaceEnablesDataExfilProtection(unittest.TestCase):
def test(self):
# given
test_files_dir = Path(__file__).parent / "example_SynapseWorkspaceEnablesDataExfilProtection"
# when
report = Runner().run(root_folder=str(test_files_dir), runner_filter=RunnerFilter(checks=[check.id]))
# then
summary = report.get_summary()
passing_resources = {
"azurerm_synapse_workspace.pass",
}
failing_resources = {
"azurerm_synapse_workspace.fail",
"azurerm_synapse_workspace.fail2",
}
passed_check_resources = {c.resource for c in report.passed_checks}
failed_check_resources = {c.resource for c in report.failed_checks}
self.assertEqual(summary["passed"], 1)
self.assertEqual(summary["failed"], 2)
self.assertEqual(summary["skipped"], 0)
self.assertEqual(summary["parsing_errors"], 0)
self.assertEqual(summary["resource_count"], 3) # 3 unknown
self.assertEqual(passing_resources, passed_check_resources)
self.assertEqual(failing_resources, failed_check_resources)
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
bridgecrewio.noreply@github.com
|
1ebd7b2c006bec2429d3ea7c144429ca6a16ab58
|
34599596e145555fde0d4264a1d222f951f49051
|
/pcat2py/class/235864d6-5cc5-11e4-af55-00155d01fe08.py
|
203705756d386be4768e626b13c813ce06acf1fd
|
[
"MIT"
] |
permissive
|
phnomcobra/PCAT2PY
|
dc2fcbee142ce442e53da08476bfe4e68619346d
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
refs/heads/master
| 2021-01-11T02:23:30.669168
| 2018-02-13T17:04:03
| 2018-02-13T17:04:03
| 70,970,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,212
|
py
|
#!/usr/bin/python
################################################################################
# 235864d6-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "235864d6-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Execute command and parse capture standard output
stdout = cli.system("ls -l /etc/group")
# Split output lines
self.output = stdout.split('\n')
# Process standard output
lineNumber = 0
for line in self.output:
lineNumber += 1
if len(line.strip()) > 0:
subStrings = line.split(' ')
if subStrings[3] == "root":
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.system("chgrp root /etc/group")
|
[
"phnomcobra@gmail.com"
] |
phnomcobra@gmail.com
|
d09e8f1f8f6ce69f17db42f0cc74904c1ba4e74e
|
e48375c39c0d1fc71742b1964dffdd3af0ff86c0
|
/nlu/components/sentence_detectors/deep_sentence_detector/deep_sentence_detector.py
|
ab4ea95db84960ec483781f792af9daed7b121c3
|
[
"Apache-2.0"
] |
permissive
|
ahmedlone127/nlu
|
b8da5a84f0e47640cb09616559bf8b84c259f278
|
614bc2ff94c80a7ebc34a78720ef29a1bf7080e0
|
refs/heads/master
| 2023-02-09T05:10:29.631583
| 2022-05-20T15:16:33
| 2022-05-20T15:16:33
| 325,437,640
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 700
|
py
|
from sparknlp.annotator import *
class SentenceDetectorDeep:
@staticmethod
def get_default_model():
return SentenceDetectorDLModel\
.pretrained()\
.setInputCols(["document"]) \
.setOutputCol("sentence")
@staticmethod
def get_pretrained_model(name,lang, bucket=None):
return SentenceDetectorDLModel.pretrained(name,lang,bucket) \
.pretrained() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
#
#
# @staticmethod
# def get_trainable_model():
# return SentenceDetectorDLApproach \
# .setInputCol("document") \
# .setOutputCol("sentence")
|
[
"christian.kasim.loan@gmail.com"
] |
christian.kasim.loan@gmail.com
|
70568dbd8fea74a804629bbf8c0ba8699ea10aaf
|
b0d7d91ccb7e388829abddb31b4aa04a2f9365cd
|
/archive-20200922/uncategorized/quick_palindrome_check.py
|
4e1d9675666f0b9bddffa3ece524d351e0e26a37
|
[] |
no_license
|
clarkngo/python-projects
|
fe0e0aa02896debe82d1e9de84b1ae7d00932607
|
139a20063476f9847652b334a8495b7df1e80e27
|
refs/heads/master
| 2021-07-02T10:45:31.242041
| 2020-10-25T08:59:23
| 2020-10-25T08:59:23
| 188,570,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
# function which return reverse of a string
def reverse(s):
return s[::-1]
def isPalindrome(s):
# Calling reverse function
rev = reverse(s)
# Checking if both string are equal or not
if (s == rev):
return True
return False
# Driver code
s = "malayalam"
ans = isPalindrome(s)
if ans == 1:
print("Yes")
else:
print("No")
|
[
"clarkngo@gmail.com"
] |
clarkngo@gmail.com
|
848b00dce8c68b93c85b751b4d5c57683f6980f1
|
2ed86a79d0fcd299ad4a01310954c5eddcf01edf
|
/homeassistant/components/airzone/coordinator.py
|
ba0296557a1be58bacea112719a507f82be0fb6b
|
[
"Apache-2.0"
] |
permissive
|
konnected-io/home-assistant
|
037f12c87bb79e19220192eb918e49db1b1a8b3e
|
2e65b77b2b5c17919939481f327963abdfdc53f0
|
refs/heads/dev
| 2023-05-11T08:57:41.891518
| 2023-05-07T20:03:37
| 2023-05-07T20:03:37
| 109,931,626
| 24
| 10
|
Apache-2.0
| 2023-02-22T06:24:01
| 2017-11-08T05:27:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,309
|
py
|
"""The Airzone integration."""
from __future__ import annotations
from datetime import timedelta
import logging
from typing import Any
from aioairzone.exceptions import AirzoneError
from aioairzone.localapi import AirzoneLocalApi
import async_timeout
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import AIOAIRZONE_DEVICE_TIMEOUT_SEC, DOMAIN
SCAN_INTERVAL = timedelta(seconds=60)
_LOGGER = logging.getLogger(__name__)
class AirzoneUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
"""Class to manage fetching data from the Airzone device."""
def __init__(self, hass: HomeAssistant, airzone: AirzoneLocalApi) -> None:
"""Initialize."""
self.airzone = airzone
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
update_interval=SCAN_INTERVAL,
)
async def _async_update_data(self) -> dict[str, Any]:
"""Update data via library."""
async with async_timeout.timeout(AIOAIRZONE_DEVICE_TIMEOUT_SEC):
try:
await self.airzone.update()
except AirzoneError as error:
raise UpdateFailed(error) from error
return self.airzone.data()
|
[
"noreply@github.com"
] |
konnected-io.noreply@github.com
|
3016c687ec5ae81b1cd9d16c05eb06f58500219f
|
968968aa5e81043cad5af6883f23ef077c36b65f
|
/load_model.py
|
87518857933f46b083d4611584a50ca9100d20e9
|
[] |
no_license
|
Guya-LTD/profanity-detector
|
59dbcb2e3e2fe4eba29cd49f5f028c48413f035f
|
ba957c42c4d14dd3c68ef2c48fce317e9db17f8f
|
refs/heads/main
| 2023-02-11T18:26:59.205036
| 2021-01-10T06:41:25
| 2021-01-10T06:41:25
| 307,553,959
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 495
|
py
|
import numpy as np
import joblib
def _get_profane_prob(prob):
return prob[1]
def predict(lang, texts):
vectorizer = joblib.load(lang + '/vectorizer.joblib')
model = joblib.load(lang + '/model.joblib')
return model.predict(vectorizer.transform(texts))
def predict_prob(lang, texts):
vectorizer = joblib.load(lang + '/vectorizer.joblib')
model = joblib.load(lang + '/model.joblib')
return np.apply_along_axis(_get_profane_prob, 1, model.predict_proba(vectorizer.transform(texts)))
|
[
"simonbelete@gmail.com"
] |
simonbelete@gmail.com
|
e6ea4e632b0b731721851c7db5ec5498ae307b76
|
3cb06711ab1a6e379e5778456fce5770ac994ba9
|
/python/wait_functions_test_py3.py
|
02cab39f268b7e1880b29bbcbcffa372099fe449
|
[
"MIT"
] |
permissive
|
glenn-edgar/chain_flow
|
7e8238c1f5e5c00f4c5906e2eb356d33c2b4696c
|
750a9b126de04e46b71a58c5bd3e7500c4d26459
|
refs/heads/master
| 2021-01-02T22:41:30.066536
| 2017-09-05T19:34:57
| 2017-09-05T19:34:57
| 99,368,944
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,444
|
py
|
from py_cf_py3.chain_flow_py3 import CF_Base_Interpreter
def test_function_1( cf_handle, chainObj, parameters, event ):
print("test function 1 ",event)
def wait_test_function( cf_handle, chainObj, parameters, event ):
print("event",event)
return_value = False
if event["name"] == "INIT":
parameters.append(0)
if event["name"] == "TIME_TICK":
parameters[-1] = parameters[-1] +1
if parameters[-1] >= parameters[1]:
return_value = True
return return_value
cf = CF_Base_Interpreter()
cf.define_chain("Chain_1", False) # wait_tod
cf.insert.log("Chain 1 started")
cf.insert.wait_tod( "*","*","*",15 ) # wait for 15 seconds
cf.insert.one_step( test_function_1)
cf.insert.log("Chain 1 is reset")
cf.insert.reset( )
cf.define_chain("Chain_2",False) # wait_tod_ge wait_tod_le
cf.insert.log("Chain 2 started")
cf.insert.wait_tod_ge( "*","*","*",45 ) # wait for 15 seconds
cf.insert.check_event( test_function_1, "TIME_TICK" )
cf.insert.wait_tod_le( "*","*","*",15) # wait for 15 seconds
cf.insert.reset( )
cf.define_chain("Chain_3",False) #wait_event_count
cf.insert.log("Chain 3 started")
cf.insert.wait_event_count(count = 10)
cf.insert.one_step( test_function_1)
cf.insert.reset()
cf.define_chain("Chain_4",True) # wait_function
cf.insert.log("Chain 4 has started")
cf.insert.wait_function(wait_test_function, 10 )
cf.insert.log("Chain 4 is ended ")
cf.insert.reset()
cf.execute()
|
[
"glenn-edgar@onyxengr.com"
] |
glenn-edgar@onyxengr.com
|
d82a7c81e00fa27c5ad59a4fc4811c1928d2518e
|
63daf225819636397fda6ef7e52783331c27f295
|
/taobao-sdk/top/api/rest/TmallProductSpecsGetRequest.py
|
b7150211c3b1b6c63e9ce9e9c0ee66bd56c5f336
|
[] |
no_license
|
cash2one/language-Python
|
e332ecfb4e9321a11407b29987ee64d44e552b15
|
8adb4f2fd2f023f9cc89b4edce1da5f71a3332ab
|
refs/heads/master
| 2021-06-16T15:15:08.346420
| 2017-04-20T02:44:16
| 2017-04-20T02:44:16
| 112,173,361
| 1
| 0
| null | 2017-11-27T09:08:57
| 2017-11-27T09:08:57
| null |
UTF-8
|
Python
| false
| false
| 356
|
py
|
'''
Created by auto_sdk on 2014.02.28
'''
from top.api.base import RestApi
class TmallProductSpecsGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.cat_id = None
self.product_id = None
self.properties = None
def getapiname(self):
return 'tmall.product.specs.get'
|
[
"a@ie9.org"
] |
a@ie9.org
|
ce502221c2081beadd2ed01aa5ddd02cf7cf7901
|
89a90707983bdd1ae253f7c59cd4b7543c9eda7e
|
/data_structures_and_algorithms_in_python/ch04/power_fast.py
|
c7f98d650facb9e5b5bb39c4db5cd09f1ee64c4c
|
[] |
no_license
|
timothyshull/python_reference_code
|
692a7c29608cadfd46a6cc409a000023e95b9458
|
f3e2205dd070fd3210316f5f470d371950945028
|
refs/heads/master
| 2021-01-22T20:44:07.018811
| 2017-03-17T19:17:22
| 2017-03-17T19:17:22
| 85,346,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
def power(x, n):
if n == 0:
return 1
else:
partial = power(x, n // 2) # rely on truncated division
result = partial * partial
if n % 2 == 1: # if n odd, include extra factor of x
result *= x
return result
|
[
"timothyshull@gmail.com"
] |
timothyshull@gmail.com
|
e8fb8b6c7a2c7ba04314e431ec618dd22761941e
|
612325535126eaddebc230d8c27af095c8e5cc2f
|
/src/build/android/pylib/utils/device_dependencies.py
|
c448396fbc0ab0c74370a723afeb7c9fb47be053
|
[
"BSD-3-Clause"
] |
permissive
|
TrellixVulnTeam/proto-quic_1V94
|
1a3a03ac7a08a494b3d4e9857b24bb8f2c2cd673
|
feee14d96ee95313f236e0f0e3ff7719246c84f7
|
refs/heads/master
| 2023-04-01T14:36:53.888576
| 2019-10-17T02:23:04
| 2019-10-17T02:23:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,315
|
py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
from pylib import constants
_BLACKLIST = [
re.compile(r'.*OWNERS'), # Should never be included.
re.compile(r'.*\.crx'), # Chrome extension zip files.
re.compile(r'.*\.so'), # Libraries packed into .apk.
re.compile(r'.*Mojo.*manifest\.json'), # Some source_set()s pull these in.
re.compile(r'.*\.py'), # Some test_support targets include python deps.
re.compile(r'.*\.stamp'), # Stamp files should never be included.
# Some test_support targets include python deps.
re.compile(r'.*\.mojom\.js'),
# Chrome external extensions config file.
re.compile(r'.*external_extensions\.json'),
# Exists just to test the compile, not to be run.
re.compile(r'.*jni_generator_tests'),
# v8's blobs get packaged into APKs.
re.compile(r'.*natives_blob.*\.bin'),
re.compile(r'.*snapshot_blob.*\.bin'),
]
def DevicePathComponentsFor(host_path, output_directory):
"""Returns the device path components for a given host path.
This returns the device path as a list of joinable path components,
with None as the first element to indicate that the path should be
rooted at $EXTERNAL_STORAGE.
e.g., given
'$CHROMIUM_SRC/foo/bar/baz.txt'
this would return
[None, 'foo', 'bar', 'baz.txt']
This handles a couple classes of paths differently than it otherwise would:
- All .pak files get mapped to top-level paks/
- Anything in the output directory gets mapped relative to the output
directory rather than the source directory.
e.g. given
'$CHROMIUM_SRC/out/Release/icu_fake_dir/icudtl.dat'
this would return
[None, 'icu_fake_dir', 'icudtl.dat']
Args:
host_path: The absolute path to the host file.
Returns:
A list of device path components.
"""
if host_path.startswith(output_directory):
if os.path.splitext(host_path)[1] == '.pak':
return [None, 'paks', os.path.basename(host_path)]
rel_host_path = os.path.relpath(host_path, output_directory)
else:
rel_host_path = os.path.relpath(host_path, constants.DIR_SOURCE_ROOT)
device_path_components = [None]
p = rel_host_path
while p:
p, d = os.path.split(p)
if d:
device_path_components.insert(1, d)
return device_path_components
def GetDataDependencies(runtime_deps_path):
"""Returns a list of device data dependencies.
Args:
runtime_deps_path: A str path to the .runtime_deps file.
Returns:
A list of (host_path, device_path) tuples.
"""
if not runtime_deps_path:
return []
with open(runtime_deps_path, 'r') as runtime_deps_file:
rel_host_files = [l.strip() for l in runtime_deps_file if l]
output_directory = constants.GetOutDirectory()
abs_host_files = [
os.path.abspath(os.path.join(output_directory, r))
for r in rel_host_files]
filtered_abs_host_files = [
host_file for host_file in abs_host_files
if not any(blacklist_re.match(host_file) for blacklist_re in _BLACKLIST)]
return [(f, DevicePathComponentsFor(f, output_directory))
for f in filtered_abs_host_files]
|
[
"2100639007@qq.com"
] |
2100639007@qq.com
|
b1dea3c4983f09b3a6dc08bf597ea9ff4f8bd617
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2158/60876/250371.py
|
2410110cab9dc4be3dd9ff187554b0e5447e6868
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 537
|
py
|
string=input()
index=0
while string[index]==' ':
index+=1
temp=""
if string[index]=='-':
temp+="-"
index+=1
while index<len(string) and string[index].isdigit():
temp+=string[index]
index+=1
if int(temp)<-2**(31):
print( -2**(31))
else:
print(temp)
elif not string[index].isdigit():
print(0)
else:
while index<len(string) and string[index].isdigit():
temp+=string[index]
index+=1
if int(temp)>2**31-1:
print(2**31-1)
else:
print(temp)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
4371051b460fbdb7f7e35435ddd12876a32f7a6e
|
21b0b4c27193898207751c91b8b2ed168a1b1638
|
/py/py_0198_ambiguous_numbers.py
|
ca312ac6f6f03c67908c6bd6ae8705a25e557c7b
|
[
"MIT"
] |
permissive
|
lcsm29/project-euler
|
67560a4e66968f1671a3d7ecf2dda6c956893dca
|
fab794ece5aa7a11fc7c2177f26250f40a5b1447
|
refs/heads/main
| 2023-07-04T11:45:24.374841
| 2021-08-07T08:20:41
| 2021-08-07T08:20:41
| 371,808,781
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,130
|
py
|
# Solution of;
# Project Euler Problem 198: Ambiguous Numbers
# https://projecteuler.net/problem=198
#
# A best approximation to a real number $x$ for the denominator bound $d$ is a
# rational number $\frac r s$ (in reduced form) with $s \le d$, so that any
# rational number $\frac p q$ which is closer to $x$ than $\frac r s$ has $q >
# d$. Usually the best approximation to a real number is uniquely determined
# for all denominator bounds. However, there are some exceptions, e. g. $\frac
# 9 {40}$ has the two best approximations $\frac 1 4$ and $\frac 1 5$ for the
# denominator bound $6$. We shall call a real number $x$ ambiguous, if there
# is at least one denominator bound for which $x$ possesses two best
# approximations. Clearly, an ambiguous number is necessarily rational. How
# many ambiguous numbers $x=\frac p q, 0 < x < \frac 1 {100}$, are there whose
# denominator $q$ does not exceed $10^8$?
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 198
timed.caller(dummy, n, i, prob_id)
|
[
"lcsm29@outlook.com"
] |
lcsm29@outlook.com
|
cac2318a8b307ad741c58dda75e970b204bed67a
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_4/bgrtej001/piglatin.py
|
8a3ae80180102da97b97c2eee4594a3e8512b2c3
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,182
|
py
|
#Tejasvin Bagirathi
#Assignment 4, Question 3
def toPigLatin(s):
wrdno = 1
new = ""
for i in range(len(s)):
if s[i] == " ":
wrdno+=1
string = s.split(" ")
for i in range(wrdno):
wrd = string[i]
#If word starts with vowel
if wrd[0] in "aeiou":
wrd = wrd + "way"
if wrdno == i:
new += wrd
else:
new += wrd + " "
else:
k = 0
for c in wrd[:]:
if c not in "aeiou":
k+=1
else: break
wrd = wrd[k:len(wrd)] + "a" + wrd[0:k] + "ay"
if wrdno == i:
new += wrd
else:
new += wrd + " "
return new
def toEnglish(s):
sentence=s.split()
newsentence=""
for word in range(len(sentence)):
if sentence[word][-3:]=="way":
newsentence+=sentence[word][:-3]+" "
elif sentence[word][-2:]=="ay":
nWord=sentence[word][:-2]
aPos=nWord.rfind("a")
newsentence+=nWord[aPos+1:]+nWord[:aPos]+" "
return(newsentence)
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
9fb17ce7b6fb0a7b73112825f591381e23c30c80
|
fe70774ff6898c5bdb0c941b4f335de576abfdb6
|
/autotest/test_flopy_io.py
|
bb09cd2207661c1b0258d7feb56b3d6788f12990
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
robinthibaut/flopy
|
35af468415d1ba6e1de119a7cb335381304fada9
|
22ef330bcfb9259fc23735d6b174d27804b624a0
|
refs/heads/develop
| 2023-06-30T21:43:24.101593
| 2023-06-13T19:46:03
| 2023-06-13T19:46:03
| 255,560,877
| 0
| 0
|
BSD-3-Clause
| 2022-10-10T12:23:38
| 2020-04-14T09:05:42
| null |
UTF-8
|
Python
| false
| false
| 3,153
|
py
|
import os
import platform
from os import getcwd
from os.path import relpath, splitdrive
from pathlib import Path
from shutil import which
import pytest
from modflow_devtools.markers import requires_exe
from modflow_devtools.misc import set_dir
from flopy.utils.flopy_io import line_parse, relpath_safe
def test_line_parse():
"""t027 test line_parse method in MNW2 Package class"""
# ensure that line_parse is working correctly
# comment handling
line = line_parse("Well-A -1 ; 2a. WELLID,NNODES")
assert line == ["Well-A", "-1"]
@requires_exe("mf6")
@pytest.mark.parametrize("scrub", [True, False])
@pytest.mark.parametrize("use_paths", [True, False])
def test_relpath_safe(function_tmpdir, scrub, use_paths):
if (
platform.system() == "Windows"
and splitdrive(function_tmpdir)[0] != splitdrive(getcwd())[0]
):
if use_paths:
assert (
Path(relpath_safe(function_tmpdir))
== function_tmpdir.absolute()
)
assert relpath_safe(Path(which("mf6"))) == str(
Path(which("mf6")).absolute()
)
else:
assert (
Path(relpath_safe(str(function_tmpdir)))
== function_tmpdir.absolute()
)
assert relpath_safe(which("mf6")) == str(
Path(which("mf6")).absolute()
)
else:
if use_paths:
assert Path(
relpath_safe(function_tmpdir, function_tmpdir.parent)
) == Path(function_tmpdir.name)
assert (
Path(
relpath_safe(
function_tmpdir, function_tmpdir.parent.parent
)
)
== Path(function_tmpdir.parent.name) / function_tmpdir.name
)
assert relpath_safe(Path(which("mf6"))) == relpath(
Path(which("mf6")), Path(getcwd())
)
else:
assert Path(
relpath_safe(str(function_tmpdir), str(function_tmpdir.parent))
) == Path(function_tmpdir.name)
assert (
Path(
relpath_safe(
str(function_tmpdir),
str(function_tmpdir.parent.parent),
)
)
== Path(function_tmpdir.parent.name) / function_tmpdir.name
)
assert relpath_safe(which("mf6")) == relpath(
which("mf6"), getcwd()
)
# test user login obfuscation
with set_dir("/"):
try:
login = os.getlogin()
if use_paths:
p = relpath_safe(Path.home(), scrub=scrub)
else:
p = relpath_safe(str(Path.home()), scrub=scrub)
if login in str(Path.home()) and scrub:
assert "***" in p
assert login not in p
except OSError:
# OSError is possible in CI, e.g. 'No such device or address'
pass
|
[
"noreply@github.com"
] |
robinthibaut.noreply@github.com
|
2676c8b70cc62e532379d2c46e363e54f2d94d14
|
97999ecca9e50972cc9e80df27d4768d83498dba
|
/credentials/migrations/0008_aboutme_resume.py
|
0ca7767e117e9ddde7e97056617a2f2465605750
|
[] |
no_license
|
apatten001/portfolio
|
c79312d13f7a75f909e2d4d66ab6ef275b69543e
|
4fdb503afccea83b849b62e3b12539e25a0b722f
|
refs/heads/master
| 2020-04-25T05:45:20.946946
| 2019-03-07T16:53:00
| 2019-03-07T16:53:00
| 172,554,299
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
# Generated by Django 2.1.5 on 2019-02-27 19:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('credentials', '0007_auto_20190128_1630'),
]
operations = [
migrations.AddField(
model_name='aboutme',
name='resume',
field=models.FileField(default='Arnold_Resume.pdf', upload_to=''),
),
]
|
[
"apatten001@yahoo.com"
] |
apatten001@yahoo.com
|
bf87b37a2e04bb39ba5a09c86b581bd34be15a03
|
cde373aef58da4226bfadee3d1a7086d22f33414
|
/Matplotlib/20-AddingMoreIndicatorData.py
|
6deebcb3c6cd32cf086b49db0aff5da22174f70c
|
[] |
no_license
|
ravi4all/ML_WeekEnd_Feb
|
6c66c6e6845062928834986980e5c229a19da6cd
|
43891ff36cfcd557861b4eebb99c44c68d24954e
|
refs/heads/master
| 2021-01-09T06:10:34.007131
| 2017-06-12T03:57:54
| 2017-06-12T03:57:54
| 80,917,805
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,150
|
py
|
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
from matplotlib.finance import candlestick_ohlc
from matplotlib import style
import numpy as np
import urllib
import datetime as dt
#style.use('ggplot')
style.use('fivethirtyeight')
MA1 = 10
MA2 = 30
# Will give the moving average
def moving_average(values, window):
weights = np.repeat(1.0, window)/window
smas = np.convolve(values, weights, 'valid')
return smas
def high_minus_low(highs, lows):
return highs-lows
# fmt - format
def bytespdate2num(fmt, encoding='utf-8'):
strconverter = mdates.strpdate2num(fmt)
def bytesconverter(b):
a = b.decode(encoding)
return strconverter(a)
return bytesconverter
def graph_data(stock):
fig = plt.figure()
# ax1 is a subplot
ax1 = plt.subplot2grid((6,1),(0,0), rowspan=1, colspan=1)
plt.title(stock)
ax2 = plt.subplot2grid((6,1),(1,0), rowspan=4, colspan=1)
plt.xlabel('Date')
plt.ylabel('Price')
ax3 = plt.subplot2grid((6,1),(5,0), rowspan=1, colspan=1)
stock_price_url = 'http://chartapi.finance.yahoo.com/instrument/1.0/'+stock+'/chartdata;type=quote;range=1y/csv'
source_code = urllib.request.urlopen(stock_price_url).read().decode()
stock_data = []
split_source = source_code.split('\n')
for line in split_source:
split_line = line.split(',')
if len(split_line) == 6:
if 'values' not in line and 'labels' not in line:
stock_data.append(line)
date, closep, highp, lowp, openp, volume = np.loadtxt(stock_data,
delimiter = ',',
unpack = True,
converters={0: bytespdate2num('%Y%m%d')})
x = 0
y = len(date)
# OHLC : open high low close
ohlc = []
while x < y:
append_me = date[x], openp[x], highp[x], lowp[x], closep[x], volume[x]
ohlc.append(append_me)
x += 1
ma1 = moving_average(closep, MA1)
ma2 = moving_average(closep, MA2)
start = len(date[MA2-1:])
h_l = list(map(high_minus_low, highp, lowp))
ax1.plot_date(date, h_l, '-')
candlestick_ohlc(ax2, ohlc, width=0.4, colorup='g', colordown='r')
for label in ax2.xaxis.get_ticklabels():
label.set_rotation(45)
ax2.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax2.xaxis.set_major_locator(mticker.MaxNLocator(10))
ax2.grid(True)
bbox_props = dict(boxstyle='larrow', fc='w', ec='k', lw=1)
# to display the last price
ax2.annotate(str(closep[-1]), (date[-1], closep[-1]),
xytext = (date[-1]+4, closep[-1]), bbox=bbox_props)
ax3.plot(date[-start:], ma1[-start:])
ax3.plot(date[-start:], ma2[-start:])
#plt.legend()
plt.subplots_adjust(left=0.11, bottom=0.24, right=0.90, top=0.90, wspace=0.2, hspace=0)
plt.show()
graph_data('ebay')
|
[
"noreply@github.com"
] |
ravi4all.noreply@github.com
|
d3b5d2220dfd64a054fc44c58b941464e11c9a62
|
bb2b6422476f5bd80171a31517465f9f62e15558
|
/catkin_ws/build/scan_tools/laser_ortho_projector/catkin_generated/pkg.installspace.context.pc.py
|
a7beec3bd992862819cd8c913a124d3586a9795b
|
[] |
no_license
|
Forrest-Z/MyKitAgv
|
ccd7b1c5fdb3a046bc5267d1827c4a08d89e74a4
|
db9506ad8c8a9012fb49775e188932e28526337e
|
refs/heads/master
| 2022-12-07T17:49:23.140713
| 2020-09-07T14:25:04
| 2020-09-07T14:25:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 566
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;nodelet;sensor_msgs;tf;pcl_ros;pcl_conversions;geometry_msgs;message_filters".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-llaser_ortho_projector".split(';') if "-llaser_ortho_projector" != "" else []
PROJECT_NAME = "laser_ortho_projector"
PROJECT_SPACE_DIR = "/home/nhamtung/TungNV/MyKitAgv/catkin_ws/install"
PROJECT_VERSION = "0.3.2"
|
[
"nhamtung125@gmail.com"
] |
nhamtung125@gmail.com
|
34c9d63c64f37b6a17a2adfae7b3bb9d3677a416
|
0130c8b14927097663157846adc4b146d67d2fda
|
/tests/common/test_run/softplus_run.py
|
72090ba2620e11675993ae68cec770d88f6b7703
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-3-Clause",
"NCSA",
"LLVM-exception",
"Zlib",
"BSD-2-Clause",
"MIT"
] |
permissive
|
Shigangli/akg
|
e8be3e0ee1eafe3e42b4cc4d424c28f08ef4c0bc
|
3766c54e0b109541932d147a6b5643a334b82403
|
refs/heads/master
| 2023-09-06T05:13:40.571583
| 2021-11-23T03:44:54
| 2021-11-23T03:44:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,615
|
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""run function for softplus"""
import numpy as np
from tests.common.tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from tests.common.test_op import softplus
from tests.common.gen_random import random_gaussian
from tests.common.base import get_rtol_atol
def softplus_run(shape, dtype, attrs):
mod = utils.op_build_test(softplus.softplus, [shape], [dtype],
kernel_name="softplus", attrs=attrs)
expect, inputs, output = gen_data(dtype, shape)
output = utils.mod_launch(mod, (inputs, output), expect=expect)
rtol, atol = get_rtol_atol("softplus", dtype)
TestCase_Result = compare_tensor(
output, expect, rtol=rtol, atol=atol, equal_nan=False)
return inputs, output, expect, TestCase_Result
def gen_data(dtype, shape):
inputs = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
expect = np.log1p(np.exp(-np.abs(inputs))) + np.maximum(inputs, 0)
output = np.full(shape, np.nan, dtype)
return expect, inputs, output
|
[
"1027252281@qq.com"
] |
1027252281@qq.com
|
43a0cab3c9c839ec46266c935ecdf82958e35ef6
|
ba3c06f9ae89479fa4987fe841ac09b5b5d71383
|
/python_for_kids/book/Projects/monster6.py
|
a10a99ac4030b0d5d0cfab3769dc4e6741f8afab
|
[] |
no_license
|
mary-tano/python-programming
|
6d806e25011e770a04a0922d0b71bf38c222d026
|
829654a3274be939fa529ed94ea568c12f7f1a27
|
refs/heads/master
| 2021-05-17T15:30:32.710838
| 2020-04-01T13:37:18
| 2020-04-01T13:37:18
| 250,846,188
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
# Лаборатория Франкенштейна
from monsterlab import *
# Основная программа
Frank = Monster("Фрэнки", "необычный")
Frank.show()
Albert = GMonster("Альберт", "задумчивый")
Albert.show()
Sigmund = SMonster("Зигмунд", "веселый")
Sigmund.show()
|
[
"masha.mary.tano@gmail.com"
] |
masha.mary.tano@gmail.com
|
db668ec99a3e918fab75689d177f3b571a030a86
|
8ef5a09d76a11c56963f18e6a08474a1a8bafe3c
|
/leet_code/7. Reverse Integer.py
|
79b791271388c6874618159d647c255bde2e2e06
|
[] |
no_license
|
roiei/algo
|
32c4677649c7666db148f6183fbfbf66c8b1969f
|
ae8bb8bf4ae4026ccaf1dce323b4098547dd35ec
|
refs/heads/master
| 2022-04-01T19:21:27.768675
| 2022-02-19T06:15:29
| 2022-02-19T06:15:29
| 169,021,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
class Solution:
def reverse(self, x: 'int') -> 'int':
if x > float('inf') or x < float('-inf'):
return 0
sign = 1
if x < 0:
sign = -1
xstr = str(x)
if -1 == sign:
xstr = xstr[1:]
xstr = xstr[::-1]
skip_cnt = 0
for ch in xstr:
if ch != '0':
break
skip_cnt += 1
res = xstr[skip_cnt:]
if '' == res:
return 0
if -1 == sign:
res = '-' + res
return int(res)
x = 123
#x = -123
#x = 120
#x = 901000
x = 1534236469 # 0
sol = Solution()
print(sol.reverse(x))
|
[
"hyoukjea.son@hyundai.com"
] |
hyoukjea.son@hyundai.com
|
1d49c638c84d9cfa20e25fd85489966f882c7123
|
bfda3af75d94767a5cb265bd68c17cfbf94e3ee1
|
/rosalind/qrt/rosalind_qrtd_tung.py
|
c6d0de60e3be4d40377362c4f3b26bdba3ad70ce
|
[] |
no_license
|
orenlivne/euler
|
d0e5b956a46eacfe423fbd6c52918beb91eea140
|
2afdd8bccdc5789c233e955b1ca626cea618eb9b
|
refs/heads/master
| 2020-12-29T02:24:36.479708
| 2016-12-15T21:27:33
| 2016-12-15T21:27:33
| 20,263,482
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,129
|
py
|
'''
============================================================
http://rosalind.info/problems/qrtd
Given: A list containing n taxa (n<=2000) and two unrooted
binary trees T1 and T2 on the given taxa. Both T1 and T2 are
given in Newick format.
Return: The quartet distance dq(T1,T2).
============================================================
'''
# From http://rosalind.info/problems/qrtd/solutions/.
# Need to get the rest of his libraries
import time
from rosalind import rostree
def qrtd(fp):
taxa = next(fp).split()
t1_str = next(fp)
t2_str = next(fp)
taxa_id = dict((s,i) for i, s in enumerate(taxa))
all_taxa = set(xrange(len(taxa)))
start_time = time.time()
def build_tree(t_str):
T = rostree.read_newick_str(t_str)
#T = make_unrooted_binary(T)
for node in T.walk(order=T.POSTORDER):
if node.is_leaf:
node.id = taxa_id[node.val]
node.nodes = set([node.id])
node.rest = all_taxa - node.nodes
else:
node.nodes = reduce(set.union, map(attrgetter('nodes'), node.children), set())
node.rest = all_taxa - node.nodes
# special case to walk unroot tree; the first node is also a leaf node
T.id = taxa_id[T.val]
T.nodes = set([T.id])
T.rest = all_taxa - T.nodes
return T
T1 = build_tree(t1_str)
T2 = build_tree(t2_str)
# link T2 nodes to T1. Mind the special case for root node.
id_2_T1 = dict((node.id,node) for node in T1.walk(type=T1.LEAF))
id_2_T1[T1.id] = T1
for node in T2.walk(type=T1.LEAF):
node.t1_node = id_2_T1[node.id]
T2.t1_node = id_2_T1[T2.id]
N = len(taxa)
print 'N=',N
count = 0
for i, v1 in enumerate(T1.walk(type=T1.INODE)):
if v1 is T1:
continue
if i % 10 == 0:
print 'T1 %3d %s' % (time.time() - start_time, i)
for A_node in T1.walk(exclude_node=v1):
A_node.color = 1
for B_node in v1.left.walk():
B_node.color = 2
for C_node in v1.right.walk():
C_node.color = 3
A1 = v1.rest
B1 = v1.left.nodes
C1 = v1.right.nodes
for v2 in T2.walk(order=T2.POSTORDER):
if v2 is T2:
pass
elif v2.is_leaf:
v2.a1 = 0
v2.b1 = 0
v2.c1 = 0
c = v2.t1_node.color
if c == 1: v2.a1 = 1
elif c == 2: v2.b1 = 1
else: v2.c1 = 1
else:
B = v2.left
C = v2.right
a1b2 = B.a1
a1c2 = C.a1
a1a2 = len(A1) - a1b2 - a1c2
b1b2 = B.b1
b1c2 = C.b1
b1a2 = len(B1) - b1b2 - b1c2
c1b2 = B.c1
c1c2 = C.c1
c1a2 = len(C1) - c1b2 - c1c2
# rememeber under v2, how many of them intersect with A1, B1 and C1
v2.a1 = a1b2 + a1c2
v2.b1 = b1b2 + b1c2
v2.c1 = c1b2 + c1c2
# 3x3=9 different orientation for T12 and T2,
# times in each case two ways to pair B and C from each tree
count += a1a2 * (a1a2-1) / 2 * (b1b2 * c1c2 + b1c2 * c1b2)
count += a1b2 * (a1b2-1) / 2 * (b1a2 * c1c2 + b1c2 * c1a2)
count += a1c2 * (a1c2-1) / 2 * (b1a2 * c1b2 + b1b2 * c1a2)
count += b1a2 * (b1a2-1) / 2 * (a1b2 * c1c2 + a1c2 * c1b2)
count += b1b2 * (b1b2-1) / 2 * (a1a2 * c1c2 + a1c2 * c1a2)
count += b1c2 * (b1c2-1) / 2 * (a1a2 * c1b2 + a1b2 * c1a2)
count += c1a2 * (c1a2-1) / 2 * (a1b2 * b1c2 + a1c2 * b1b2)
count += c1b2 * (c1b2-1) / 2 * (a1a2 * b1c2 + a1c2 * b1a2)
count += c1c2 * (c1c2-1) / 2 * (a1a2 * b1b2 + a1b2 * b1a2)
print N * (N - 1) * (N- 2) * (N - 3) / 12 - count
if __name__ == "__main__":
print qrtd('rosalind_qrtd_sample.dat')
#print qrtd('rosalind_qrtd.dat')
|
[
"oren.livne@gmail.com"
] |
oren.livne@gmail.com
|
710bcc0fb5dcc70b3aacdae1595043478681cdb2
|
02560440f9f91e583fe98d80ab11e18aa6c7a525
|
/apps/usuarios/migrations/0003_usuario_correo.py
|
ad084ca9dc73a72604d08e401a8af1a08f618f45
|
[] |
no_license
|
eduardogpg/wamadeusV1
|
a36c89176543e638486009620c5131f46743edbc
|
82d93293dc6afc95a6661f727162f4055ab83a43
|
refs/heads/master
| 2020-12-28T01:57:47.831689
| 2015-01-08T05:14:25
| 2015-01-08T05:14:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('usuarios', '0002_auto_20141215_1710'),
]
operations = [
migrations.AddField(
model_name='usuario',
name='correo',
field=models.EmailField(default=' ', max_length=50),
preserve_default=False,
),
]
|
[
"eduardo78d@gmail.com"
] |
eduardo78d@gmail.com
|
48067e4ceef655c896f3a35b0571079df7c10a52
|
97a4d29863d1ce96f366554fdd985c3ce580bb5d
|
/061.py
|
f14890c43a1e22228adf9d4732a5d4ba2c6c44f6
|
[] |
no_license
|
Everfighting/Python-Algorithms
|
5c3a102fed3a29858f3112d657c69e077efc7e28
|
235e9b4c66602035be39a8d3b3ad9cf016aebbb9
|
refs/heads/master
| 2021-01-20T22:19:18.902687
| 2018-03-02T05:38:27
| 2018-03-02T05:38:27
| 61,302,323
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
if __name__ == '__main__':
a = []
for i in range(10):
a.append([]) #创建十行
for j in range(10):
a[i].append(0) #每行创建i列
# 杨辉三角边界都为1
for i in range(10):
a[i][0] = 1
a[i][i] = 1
# 杨辉三角定义,下一行数值为上一行数与上一行前面数之和(除边界)
for i in range(2,10):
for j in range(1,i):
a[i][j] = a[i - 1][j-1] + a[i - 1][j]
from sys import stdout
for i in range(10):
for j in range(i + 1):
stdout.write(str(a[i][j]))
stdout.write(' ')
print
|
[
"cbb903601682@163.com"
] |
cbb903601682@163.com
|
e1bf319ac4b1a93b08f0dafc5fd453b9cd95d5b1
|
4e44974b9e59dfd4324d84b12b10f008117814cd
|
/test_autofit/integration/src/dataset/dataset.py
|
c3dc9773c00b8b4cc97f43fc249734b1546be650
|
[
"MIT"
] |
permissive
|
PriyatamNayak/PyAutoFit
|
2cc2608943f8c3bdbda3b268142e7307014ccaf2
|
32c0c30acd219030c86a12db82ae54e406fd7119
|
refs/heads/master
| 2023-03-04T07:27:41.547966
| 2021-02-11T23:21:00
| 2021-02-11T23:21:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,487
|
py
|
from astropy.io import fits
import numpy as np
# The 'dataset.py' module has been extended to give the dataset a name and metadata.
class Dataset:
def __init__(self, data, noise_map, name=None):
"""A class containing the data and noise-map of a 1D line dataset.
Parameters
----------
data : np.ndarray
The array of the data, in arbitrary units.
noise_map : np.ndarray
An array describing the RMS standard deviation error in each data pixel, in arbitrary units.
"""
self.data = data
self.noise_map = noise_map
# The name of the dataset is used by the aggregator, to determine the name of the file the dataset is saved as
# and so that when using the aggregator you can know which dataset you are manipulating.
self.name = name if name is str else "dataset"
@property
def xvalues(self):
return np.arange(self.data.shape[0])
@classmethod
def from_fits(cls, data_path, noise_map_path, name=None):
"""Load the data and noise-map of a 1D line dataset from ``.fits`` files.
Parameters
----------
data_path : str
The path on your hard-disk to the ``.fits`` file of the data.
noise_map_path : str
The path on your hard-disk to the ``.fits`` file of the noise-map.
"""
data_hdu_list = fits.open(data_path)
noise_map_hdu_list = fits.open(noise_map_path)
data = np.array(data_hdu_list[0].data)
noise_map = np.array(noise_map_hdu_list[0].data)
return Dataset(data=data, noise_map=noise_map, name=name)
class MaskedDataset:
def __init__(self, dataset, mask):
"""
A masked dataset, which is an image, noise-map and mask.
Parameters
----------
dataset: im.Dataset
The dataset (the image, noise-map, etc.)
mask: msk.Mask2D
The 1D mask that is applied to the dataset.
"""
self.dataset = dataset
self.mask = mask
self.data = dataset.data * np.invert(mask)
self.noise_map = dataset.noise_map * np.invert(mask)
@property
def xvalues(self):
return np.arange(self.data.shape[0])
def signal_to_noise_map(self):
return self.data / self.noise_map
def with_left_trimmed(self, data_trim_left):
if data_trim_left is None:
return self
# Here, we use the existing masked dataset to create a trimmed dataset.
data_trimmed = self.dataset.data[data_trim_left:]
noise_map_trimmed = self.dataset.noise_map[data_trim_left:]
dataset_trimmed = Dataset(data=data_trimmed, noise_map=noise_map_trimmed)
mask_trimmed = self.mask[data_trim_left:]
return MaskedDataset(dataset=dataset_trimmed, mask=mask_trimmed)
def with_right_trimmed(self, data_trim_right):
if data_trim_right is None:
return self
# We do the same as above, but removing data to the right.
data_trimmed = self.dataset.data[:-data_trim_right]
noise_map_trimmed = self.dataset.noise_map[:-data_trim_right]
dataset_trimmed = Dataset(data=data_trimmed, noise_map=noise_map_trimmed)
mask_trimmed = self.mask[:-data_trim_right]
return MaskedDataset(dataset=dataset_trimmed, mask=mask_trimmed)
|
[
"james.w.nightingale@durham.ac.uk"
] |
james.w.nightingale@durham.ac.uk
|
3a59b6324f48032a8c58f34957ffbed79c1fcb08
|
72f2f37c3c33e5bc02ec6c707a7c858d7990db3a
|
/examples/tour_examples/driverjs_maps_tour.py
|
33fb342608c1c2cd08a48da9c5a1aab3f8ac71a0
|
[
"MIT"
] |
permissive
|
matthewxuda/SeleniumBase
|
190e4917dec8c731f17fd9d6a1247f8c17086d0c
|
efd282a860206dad81d0d4e61a472138eb04328d
|
refs/heads/master
| 2023-09-01T09:17:57.608760
| 2021-10-21T02:48:32
| 2021-10-21T02:48:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,129
|
py
|
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_create_tour(self):
self.open("https://www.google.com/maps/@42.3591234,-71.0915634,15z")
self.wait_for_element("#searchboxinput", timeout=20)
self.wait_for_element("#minimap", timeout=20)
self.wait_for_element("#zoom", timeout=20)
# Create a website tour using the DriverJS library
# Same as: self.create_driverjs_tour()
self.create_tour(theme="driverjs")
self.add_tour_step(
"🗺️ Welcome to Google Maps 🗺️",
"html",
title="✅ SeleniumBase Tours 🌎",
)
self.add_tour_step(
"You can type a location into this Search box.", "#searchboxinput"
)
self.add_tour_step(
"Then click here to view it on the map.",
"#searchbox-searchbutton",
alignment="bottom",
)
self.add_tour_step(
"Or click here to get driving directions.",
"#searchbox-directions",
alignment="bottom",
)
self.add_tour_step(
"Use this button to get a Satellite view.",
"div.widget-minimap-shim",
alignment="right",
)
self.add_tour_step(
"Click here to zoom in.", "#widget-zoom-in", alignment="left"
)
self.add_tour_step(
"Or click here to zoom out.", "#widget-zoom-out", alignment="left"
)
self.add_tour_step(
"Use the Menu button for more options.",
".searchbox-hamburger-container",
alignment="right",
)
self.add_tour_step(
"Or click here to see more Google apps.",
'[title="Google apps"]',
alignment="left",
)
self.add_tour_step(
"Thanks for using SeleniumBase Tours",
"html",
title="🚃 End of Guided Tour 🚃",
)
self.export_tour() # The default name for exports is "my_tour.js"
self.play_tour(interval=0) # If interval > 0, autoplay after N seconds
|
[
"mdmintz@gmail.com"
] |
mdmintz@gmail.com
|
edc117b558873902ee1d38b226f7af11cebc80c9
|
58df99d96af6a688852993e38da89b75fea1d0dc
|
/exps/NATS-Bench/draw-correlations.py
|
6afac3b804703bc53660e618d2c2a6e820974d3e
|
[
"MIT"
] |
permissive
|
yuezhixiong/AutoDL-Projects
|
0f24ed98389b70f452a79c8ef825d5e563ac5d8c
|
0d3c63bdbe2d648c2119ffe8d0491f8a07cf85cb
|
refs/heads/master
| 2023-03-22T17:15:37.013837
| 2021-03-02T05:13:51
| 2021-03-02T05:13:51
| 315,518,182
| 0
| 1
|
MIT
| 2021-02-26T06:36:34
| 2020-11-24T04:28:29
|
Python
|
UTF-8
|
Python
| false
| false
| 3,860
|
py
|
###############################################################
# NATS-Bench (arxiv.org/pdf/2009.00437.pdf), IEEE TPAMI 2021 #
###############################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020.06 #
###############################################################
# Usage: python exps/NATS-Bench/draw-correlations.py #
###############################################################
import os, gc, sys, time, scipy, torch, argparse
import numpy as np
from typing import List, Text, Dict, Any
from shutil import copyfile
from collections import defaultdict, OrderedDict
from copy import deepcopy
from pathlib import Path
import matplotlib
import seaborn as sns
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
lib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()
if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))
from config_utils import dict2config, load_config
from nats_bench import create
from log_utils import time_string
def get_valid_test_acc(api, arch, dataset):
is_size_space = api.search_space_name == 'size'
if dataset == 'cifar10':
xinfo = api.get_more_info(arch, dataset=dataset, hp=90 if is_size_space else 200, is_random=False)
test_acc = xinfo['test-accuracy']
xinfo = api.get_more_info(arch, dataset='cifar10-valid', hp=90 if is_size_space else 200, is_random=False)
valid_acc = xinfo['valid-accuracy']
else:
xinfo = api.get_more_info(arch, dataset=dataset, hp=90 if is_size_space else 200, is_random=False)
valid_acc = xinfo['valid-accuracy']
test_acc = xinfo['test-accuracy']
return valid_acc, test_acc, 'validation = {:.2f}, test = {:.2f}\n'.format(valid_acc, test_acc)
def compute_kendalltau(vectori, vectorj):
# indexes = list(range(len(vectori)))
# rank_1 = sorted(indexes, key=lambda i: vectori[i])
# rank_2 = sorted(indexes, key=lambda i: vectorj[i])
# import pdb; pdb.set_trace()
coef, p = scipy.stats.kendalltau(vectori, vectorj)
return coef
def compute_spearmanr(vectori, vectorj):
coef, p = scipy.stats.spearmanr(vectori, vectorj)
return coef
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='NATS-Bench: Benchmarking NAS Algorithms for Architecture Topology and Size', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--save_dir', type=str, default='output/vis-nas-bench/nas-algos', help='Folder to save checkpoints and log.')
parser.add_argument('--search_space', type=str, choices=['tss', 'sss'], help='Choose the search space.')
args = parser.parse_args()
save_dir = Path(args.save_dir)
api = create(None, 'tss', fast_mode=True, verbose=False)
indexes = list(range(1, 10000, 300))
scores_1 = []
scores_2 = []
for index in indexes:
valid_acc, test_acc, _ = get_valid_test_acc(api, index, 'cifar10')
scores_1.append(valid_acc)
scores_2.append(test_acc)
correlation = compute_kendalltau(scores_1, scores_2)
print('The kendall tau correlation of {:} samples : {:}'.format(len(indexes), correlation))
correlation = compute_spearmanr(scores_1, scores_2)
print('The spearmanr correlation of {:} samples : {:}'.format(len(indexes), correlation))
# scores_1 = ['{:.2f}'.format(x) for x in scores_1]
# scores_2 = ['{:.2f}'.format(x) for x in scores_2]
# print(', '.join(scores_1))
# print(', '.join(scores_2))
dpi, width, height = 250, 1000, 1000
figsize = width / float(dpi), height / float(dpi)
LabelSize, LegendFontsize = 14, 14
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.scatter(scores_1, scores_2 , marker='^', s=0.5, c='tab:green', alpha=0.8)
save_path = '/Users/xuanyidong/Desktop/test-temp-rank.png'
fig.savefig(save_path, dpi=dpi, bbox_inches='tight', format='png')
plt.close('all')
|
[
"280835372@qq.com"
] |
280835372@qq.com
|
becb97ab51bd113a00a2a0c169559e348ee0f82c
|
a46b14b44c87adb0288224a0e7e31d9bed30223f
|
/guest_project/apps/guest_app/models.py
|
f6db55f42203f80a0a458a0b4a83ca4f50478693
|
[] |
no_license
|
JeffLawrence1/Python-Django-Intermediate
|
0b663e5d706dc6b35ff2785ae38d7bf0f2f3b651
|
d1efc3e6385286ab25bae36042987a85ae94e359
|
refs/heads/master
| 2020-03-09T03:42:47.348420
| 2018-04-07T21:42:04
| 2018-04-07T21:42:04
| 128,570,954
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class User(models.Model):
name = models.CharField(max_length=255)
|
[
"jefflaw13@hotmail.com"
] |
jefflaw13@hotmail.com
|
592cca932c6d29898437e2362af88c8d578e9466
|
a735cc0b04b3227720bfd97c74ef13bda5bdf571
|
/python/documentation/doc/conf.py
|
87be3541d59a67e9c9cc135f03e7e0690fa181a4
|
[
"MIT"
] |
permissive
|
abstractfactory/labs
|
beed0aab27cd3028c67ece87ef91d18b55114eb1
|
f0791fb92686456d4cef3a11f699590a949fd6a9
|
refs/heads/master
| 2021-01-23T20:50:07.613682
| 2014-11-18T10:30:29
| 2014-11-18T10:30:29
| 20,175,862
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,179
|
py
|
# -*- coding: utf-8 -*-
#
# Labs documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 24 15:49:19 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Labs'
copyright = u'2014, Marcus Ottosson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Labsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Labs.tex', u'Labs Documentation',
u'Marcus Ottosson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'labs', u'Labs Documentation',
[u'Marcus Ottosson'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Labs', u'Labs Documentation',
u'Marcus Ottosson', 'Labs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
[
"konstruktion@gmail.com"
] |
konstruktion@gmail.com
|
5242f6f122ece46875d63baf451df2044a5956d8
|
ce083128fa87ca86c65059893aa8882d088461f5
|
/python/pytest-labs/.venv/lib/python3.6/site-packages/facebook_business/adobjects/adcampaignfrequencycontrolspecs.py
|
d0005352b87cf20f8700d3c56dda97efc9a99ee6
|
[] |
no_license
|
marcosptf/fedora
|
581a446e7f81d8ae9a260eafb92814bc486ee077
|
359db63ff1fa79696b7bc803bcfa0042bff8ab44
|
refs/heads/master
| 2023-04-06T14:53:40.378260
| 2023-03-26T00:47:52
| 2023-03-26T00:47:52
| 26,059,824
| 6
| 5
| null | 2022-12-08T00:43:21
| 2014-11-01T18:48:56
| null |
UTF-8
|
Python
| false
| false
| 1,973
|
py
|
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class AdCampaignFrequencyControlSpecs(
AbstractObject,
):
def __init__(self, api=None):
super(AdCampaignFrequencyControlSpecs, self).__init__()
self._isAdCampaignFrequencyControlSpecs = True
self._api = api
class Field(AbstractObject.Field):
event = 'event'
interval_days = 'interval_days'
max_frequency = 'max_frequency'
_field_types = {
'event': 'string',
'interval_days': 'unsigned int',
'max_frequency': 'unsigned int',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info
|
[
"marcosptf@yahoo.com.br"
] |
marcosptf@yahoo.com.br
|
254e98217498dea904c67279827063013f34b5fb
|
e6421de3f06af8be4234e9901d71f86b31c6c3a7
|
/pdenv/bin/easy_install-3.5
|
5e6b880f3fb8150f6afd21b014f591583dfa7719
|
[
"MIT"
] |
permissive
|
Elmartin913/PanDjango
|
bdb5446ee18ee297c23199cd3f9dd59cae555135
|
3b1eb52d53c87365f3d2fa5bd7ef72843ed5af32
|
refs/heads/master
| 2022-12-11T04:44:05.229530
| 2018-05-11T10:16:07
| 2018-05-11T10:16:07
| 128,903,323
| 0
| 0
|
MIT
| 2022-12-08T00:57:53
| 2018-04-10T08:54:10
|
CSS
|
UTF-8
|
Python
| false
| false
| 276
|
5
|
#!/home/elmartin913/workspace/app/PanDjango/pdenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"marcin.jab@wp.pl"
] |
marcin.jab@wp.pl
|
e57aebb6fb7ca69bcb5a28998f4b3016e5559651
|
47366be5cbee9d7e086291c20f97f10ab2bf74fe
|
/cluster/cluster_create_inequalities_subset_kdd.py
|
a030a3765bc70ab81a1b6e0dfd314582797a9901
|
[] |
no_license
|
nipunbatra/journal
|
3d44eed05c95970606649d17402da54fc0a415ff
|
94a8b88589e8f60e6f0314f8c5a374f22336b3e9
|
refs/heads/master
| 2021-01-09T20:40:45.844121
| 2016-07-27T15:16:29
| 2016-07-27T15:16:29
| 62,874,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,042
|
py
|
import time
import pandas as pd
import pickle
import os
import numpy as np
SLURM_OUT = "../slurm_out"
from subprocess import Popen
import time
print "a"
out_overall = pickle.load(open('../data/input/all_regions.pkl','r'))
num_trials=25
print "b"
K = 3
for train_region in ["SanDiego"]:
if train_region=="Austin":
NUM_HOMES_MAX = 45
elif train_region=="SanDiego":
NUM_HOMES_MAX = len(out_overall['SanDiego'])
else:
NUM_HOMES_MAX = len(out_overall['Boulder'])
NUM_HOMES_MAX=20
for test_region in ["Austin"]:
if train_region!=test_region:
TRANSFORMATIONS = ["None","DD","DD-percentage","median-aggregate-percentage",
"median-aggregate",'regional','regional-percentage']
else:
TRANSFORMATIONS = ["None"]
train_df = out_overall[train_region]
test_df = out_overall[test_region]
test_df=test_df[(test_df.full_agg_available==1)&(test_df.md_available==1)]
NUM_HOMES_MIN=4
for num_homes in range(NUM_HOMES_MIN, NUM_HOMES_MAX, 2):
for transform in TRANSFORMATIONS:
#for transform in ["None","DD","DD-percentage"]:
#for transform in ["median-aggregate-percentage"]:
print transform
print "*"*40
count = 0
#for appliance in ["dw",'hvac','fridge','wm','mw','ec','wh','oven']:
for appliance in ["hvac"]:
if appliance=="hvac":
month_min, month_max = 5, 11
else:
month_min, month_max = 1, 13
count+= 1
#for appliance in ["hvac","fridge","dr","wm"]:
test_df = test_df.ix[test_df[['%s_%d' %(appliance,month) for month in range(month_min, month_max)]].dropna().index]
for test_home in test_df.index:
#for appliance in ["mw"]:
if len(test_df.ix[test_home][['%s_%d' %(appliance, m) for m in range(month_min, month_max)]].dropna())==0:
# Appliance data not present for this homes..let's save some time
continue
print appliance, test_home, count, len(test_df.index), K, transform, train_region, test_region
OFILE = "%s/%d_%s_%s_%d_%s_%s.out" % (SLURM_OUT, num_homes, train_region[0], test_region[0], test_home, appliance[0], transform[0] )
EFILE = "%s/%d_%s_%s_%d_%s_%s.err" % (SLURM_OUT, num_homes, train_region[0], test_region[0], test_home, appliance, transform )
SLURM_SCRIPT = "%d_%s_%s_%d_%s_%s.pbs" % (num_homes, train_region[0], test_region[0], test_home, appliance[:2], transform)
CMD = 'python ../new_experiments/create_inequalities_subset_kdd.py %s %s %d %s %s %d %d %d' % (train_region, test_region,
test_home, appliance,
transform, K, num_homes, num_trials)
lines = []
lines.append("#!/bin/sh\n")
lines.append('#SBATCH --time=0-05:0:00\n')
lines.append('#SBATCH --mem=16\n')
lines.append('#SBATCH -o '+'"' +OFILE+'"\n')
lines.append('#SBATCH -e '+'"' +EFILE+'"\n')
lines.append(CMD+'\n')
with open(SLURM_SCRIPT, 'w') as f:
f.writelines(lines)
command = ['sbatch', SLURM_SCRIPT]
Popen(command)
#os.remove(SLURM_SCRIPT)
print "Now sleeping.."
import time
time.sleep(40)
time.sleep(400)
time.sleep(1200)
|
[
"nipunb@iiitd.ac.in"
] |
nipunb@iiitd.ac.in
|
05f02000e82ea0aa84a9665a9401fad1feec02b2
|
03587c34370995706871e45320264c2636d795f0
|
/app/views/loja/AvaliacaoView.py
|
a391584f99994610a29e9c4c605cadf597837918
|
[] |
no_license
|
caiomarinhodev/fastdelivery
|
29d1f95dc7204369806e6b99298c9aaafab5ea9f
|
6ad45aa596e204b793ba47f7a0c1b918a2e0890a
|
refs/heads/master
| 2020-03-12T03:18:04.507010
| 2018-04-20T23:49:13
| 2018-04-20T23:49:13
| 130,421,809
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,139
|
py
|
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect
from django.views.generic import DetailView
from app.models import Request, Avaliacao
class AvaliacaoView(LoginRequiredMixin, DetailView):
template_name = 'loja/avaliacao_cliente.html'
login_url = '/define/login/'
model = Request
context_object_name = 'pedido_obj'
def get(self, request, *args, **kwargs):
return super(AvaliacaoView, self).get(request, *args, **kwargs)
def add_avaliacao(request):
data = request.POST
pedido = Request.objects.get(id=data['pedido'])
if 'comentario' and 'nota' in data:
aval = Avaliacao(cliente=pedido.cliente, estabelecimento=pedido.estabelecimento, nota=data['nota'],
comentario=data['comentario'])
aval.save()
else:
messages.error(request, 'Insira uma nota e um comentario')
return redirect('/avaliacao/pedido/' + str(data['pedido']))
messages.success(request, 'Avaliacao Realizada com Sucesso')
return redirect('/acompanhar-pedido/' + str(data['pedido']))
|
[
"caiomarinho8@gmail.com"
] |
caiomarinho8@gmail.com
|
368438d6bd6eb2e764a63f7c2983f6a8380944e8
|
80775c192c7084171a0371b0fe14330b8cd89f0f
|
/stickerizer/emojis.py
|
7070fbc525378011ade618d9b44d039bdcc88f9a
|
[
"MIT"
] |
permissive
|
vanyakosmos/face2sticker
|
5435ddbbc123c782a6501a78f6142e1ce88f9bc7
|
7b82eb12dd3e4c54c5033caee77f57b751f637b8
|
refs/heads/master
| 2021-09-13T07:40:51.156215
| 2018-04-26T17:16:24
| 2018-04-26T17:16:24
| 105,321,918
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,318
|
py
|
import numpy as np
from emotion_clf.emotion import load_clf, vectorize, emotions
clf = load_clf('emotion_clf/clf2.pkl')
def associate_emojis(face_landmarks):
emotions_probs = predict_probabilities(face_landmarks)
emoji = map_emoji(emotions_probs)
return emoji
def predict_probabilities(face_landmarks: dict):
landmarks = []
for points in face_landmarks.values():
landmarks.extend(points)
vector = vectorize(landmarks)
data = np.array([vector])
res = clf.predict_proba(data)[0]
probs = {}
for i, e in enumerate(emotions):
probs[e] = res[i]
return probs
def map_emoji(emotions_prob: dict):
emojis = {
'😡': {
'anger': 10,
},
'😒': {
'contempt': 10,
},
'😣': {
'disgust': 10,
},
'😱': {
'fear': 10,
},
'😀': {
'happiness': 10,
},
'😢': {
'sadness': 10,
},
'😮': {
'surprise': 10,
},
}
max_s = None
result = '🌚'
for emoji, ems in emojis.items():
s = sum([ems.get(e, 1) * emotions_prob[e] for e in emotions])
if not max_s or s > max_s:
max_s = s
result = emoji
return result
|
[
"bachynin.i@gmail.com"
] |
bachynin.i@gmail.com
|
8f7d2e670202fe46834fd31c9e7eaf218bed9b04
|
ca3d6e6683f4736792fc93352424c6e6d216ab4d
|
/chapter9/chapter9_app_external_api_test.py
|
ccdbb3f2d629abb50083ae1be6495e4b66566be2
|
[
"MIT"
] |
permissive
|
msg4rajesh/Building-Data-Science-Applications-with-FastAPI
|
11ac071583002b15bc955fc3bc72ab86d2800222
|
99b472d8295a57c5a74a63d8184ac053dc4012f2
|
refs/heads/main
| 2023-07-16T09:48:48.536002
| 2021-08-26T05:02:39
| 2021-08-26T05:02:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
import asyncio
from typing import Any, Dict
import httpx
import pytest
from asgi_lifespan import LifespanManager
from fastapi import status
from chapter9.chapter9_app_external_api import app, external_api
class MockExternalAPI:
mock_data = {
"data": [
{
"employee_age": 61,
"employee_name": "Tiger Nixon",
"employee_salary": 320800,
"id": 1,
"profile_image": "",
}
],
"status": "success",
"message": "Success",
}
async def __call__(self) -> Dict[str, Any]:
return MockExternalAPI.mock_data
@pytest.fixture(scope="session")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
loop.close()
@pytest.fixture
async def test_client():
app.dependency_overrides[external_api] = MockExternalAPI()
async with LifespanManager(app):
async with httpx.AsyncClient(app=app, base_url="http://app.io") as test_client:
yield test_client
@pytest.mark.asyncio
async def test_get_employees(test_client: httpx.AsyncClient):
response = await test_client.get("/employees")
assert response.status_code == status.HTTP_200_OK
json = response.json()
assert json == MockExternalAPI.mock_data
|
[
"fvoron@gmail.com"
] |
fvoron@gmail.com
|
ff7e5353de2674b363d6503c65205bd258975026
|
dfff7fef4d49266db475856d4c0afef8ca672e00
|
/tests/cantfit.py
|
54f692c8b57158768e4561a4098cae020b3eafbe
|
[
"MIT"
] |
permissive
|
funilrys/black
|
70a5a251338ab67fed0771ab6ec97cca03aa378b
|
b4cee97c99d5513ef81fdf2bff1809721662f87d
|
refs/heads/master
| 2020-03-17T14:41:13.259870
| 2018-05-16T05:15:28
| 2018-05-16T05:15:28
| 133,682,656
| 1
| 0
| null | 2018-05-16T14:57:35
| 2018-05-16T14:57:35
| null |
UTF-8
|
Python
| false
| false
| 2,983
|
py
|
# long variable name
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = 0
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = 1 # with a comment
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = [
1, 2, 3
]
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = function()
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = function(
arg1, arg2, arg3
)
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = function(
[1, 2, 3], arg1, [1, 2, 3], arg2, [1, 2, 3], arg3
)
# long function name
normal_name = but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying()
normal_name = but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying(
arg1, arg2, arg3
)
normal_name = but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying(
[1, 2, 3], arg1, [1, 2, 3], arg2, [1, 2, 3], arg3
)
# long arguments
normal_name = normal_function_name(
"but with super long string arguments that on their own exceed the line limit so there's no way it can ever fit",
"eggs with spam and eggs and spam with eggs with spam and eggs and spam with eggs with spam and eggs and spam with eggs",
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it=0,
)
# output
# long variable name
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = (
0
)
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = (
1
) # with a comment
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = [
1, 2, 3
]
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = (
function()
)
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = function(
arg1, arg2, arg3
)
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it = function(
[1, 2, 3], arg1, [1, 2, 3], arg2, [1, 2, 3], arg3
)
# long function name
normal_name = (
but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying()
)
normal_name = but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying(
arg1, arg2, arg3
)
normal_name = but_the_function_name_is_now_ridiculously_long_and_it_is_still_super_annoying(
[1, 2, 3], arg1, [1, 2, 3], arg2, [1, 2, 3], arg3
)
# long arguments
normal_name = normal_function_name(
"but with super long string arguments that on their own exceed the line limit so there's no way it can ever fit",
"eggs with spam and eggs and spam with eggs with spam and eggs and spam with eggs with spam and eggs and spam with eggs",
this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it=0,
)
|
[
"lukasz@langa.pl"
] |
lukasz@langa.pl
|
6ec673beb0c506a5c90bb8c68908c0c73c13587c
|
3a74ac2e7db63069945e5bc620342b4b89b8b201
|
/python/dgl/distributed/rpc_server.py
|
ad47de7104c4d92fc64b87b7cdc82f26fefd6a38
|
[
"Apache-2.0"
] |
permissive
|
vishalbelsare/dgl
|
5d17ba82f720d742e1274c5d48dac64eca234504
|
512a80b00d2cd35607a542eb5544fa1f1c93a6f6
|
refs/heads/master
| 2023-08-17T15:09:55.082014
| 2022-01-22T04:25:14
| 2022-01-22T04:25:14
| 167,955,673
| 0
| 0
|
Apache-2.0
| 2022-01-23T13:57:57
| 2019-01-28T12:05:37
|
Python
|
UTF-8
|
Python
| false
| false
| 4,476
|
py
|
"""Functions used by server."""
import time
from . import rpc
from .constants import MAX_QUEUE_SIZE
def start_server(server_id, ip_config, num_servers, num_clients, server_state, \
max_queue_size=MAX_QUEUE_SIZE, net_type='socket'):
"""Start DGL server, which will be shared with all the rpc services.
This is a blocking function -- it returns only when the server shutdown.
Parameters
----------
server_id : int
Current server ID (starts from 0).
ip_config : str
Path of IP configuration file.
num_servers : int
Server count on each machine.
num_clients : int
Total number of clients that will be connected to the server.
Note that, we do not support dynamic connection for now. It means
that when all the clients connect to server, no client will can be added
to the cluster.
server_state : ServerSate object
Store in main data used by server.
max_queue_size : int
Maximal size (bytes) of server queue buffer (~20 GB on default).
Note that the 20 GB is just an upper-bound because DGL uses zero-copy and
it will not allocate 20GB memory at once.
net_type : str
Networking type. Current options are: 'socket'.
"""
assert server_id >= 0, 'server_id (%d) cannot be a negative number.' % server_id
assert num_servers > 0, 'num_servers (%d) must be a positive number.' % num_servers
assert num_clients >= 0, 'num_client (%d) cannot be a negative number.' % num_clients
assert max_queue_size > 0, 'queue_size (%d) cannot be a negative number.' % max_queue_size
assert net_type in ('socket'), 'net_type (%s) can only be \'socket\'' % net_type
# Register signal handler.
rpc.register_sig_handler()
# Register some basic services
rpc.register_service(rpc.CLIENT_REGISTER,
rpc.ClientRegisterRequest,
rpc.ClientRegisterResponse)
rpc.register_service(rpc.SHUT_DOWN_SERVER,
rpc.ShutDownRequest,
None)
rpc.register_service(rpc.GET_NUM_CLIENT,
rpc.GetNumberClientsRequest,
rpc.GetNumberClientsResponse)
rpc.register_service(rpc.CLIENT_BARRIER,
rpc.ClientBarrierRequest,
rpc.ClientBarrierResponse)
rpc.set_rank(server_id)
server_namebook = rpc.read_ip_config(ip_config, num_servers)
machine_id = server_namebook[server_id][0]
rpc.set_machine_id(machine_id)
ip_addr = server_namebook[server_id][1]
port = server_namebook[server_id][2]
rpc.create_sender(max_queue_size, net_type)
rpc.create_receiver(max_queue_size, net_type)
# wait all the senders connect to server.
# Once all the senders connect to server, server will not
# accept new sender's connection
print("Wait connections non-blockingly...")
rpc.receiver_wait(ip_addr, port, num_clients, blocking=False)
rpc.set_num_client(num_clients)
# Recv all the client's IP and assign ID to clients
addr_list = []
client_namebook = {}
for _ in range(num_clients):
# blocked until request is received
req, _ = rpc.recv_request()
assert isinstance(req, rpc.ClientRegisterRequest)
addr_list.append(req.ip_addr)
addr_list.sort()
for client_id, addr in enumerate(addr_list):
client_namebook[client_id] = addr
for client_id, addr in client_namebook.items():
client_ip, client_port = addr.split(':')
# TODO[Rhett]: server should not be blocked endlessly.
while not rpc.connect_receiver(client_ip, client_port, client_id):
time.sleep(1)
if rpc.get_rank() == 0: # server_0 send all the IDs
for client_id, _ in client_namebook.items():
register_res = rpc.ClientRegisterResponse(client_id)
rpc.send_response(client_id, register_res)
# main service loop
while True:
req, client_id = rpc.recv_request()
res = req.process_request(server_state)
if res is not None:
if isinstance(res, list):
for response in res:
target_id, res_data = response
rpc.send_response(target_id, res_data)
elif isinstance(res, str) and res == 'exit':
break # break the loop and exit server
else:
rpc.send_response(client_id, res)
|
[
"noreply@github.com"
] |
vishalbelsare.noreply@github.com
|
f2ed1999ab2fe5b10597c440649a3b93645b82d3
|
c1e305171afcd18fdd66a46cbcf81d8dbcc3fd0c
|
/PyTorch/Py09_dropout.py
|
447dd6e594089cf2812c287b60aa6a968b1ae24c
|
[] |
no_license
|
ANRhine/PyTorch_Tutorial
|
2f0d9fcc94dfec37a352b5dcb37fc66738abc37d
|
378d03d2f2cfa08ff2040096218078a2e3cd659a
|
refs/heads/master
| 2021-04-07T06:24:28.608860
| 2018-03-16T14:43:03
| 2018-03-16T14:43:03
| 125,291,327
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,116
|
py
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
"""
-------------------------------------
File name: Py09_dropout.py
Author: Ruonan Yu
Date: 18-1-30
-------------------------------------
"""
import torch
import matplotlib.pyplot as plt
from torch.autograd import Variable
import torch.nn as nn
torch.manual_seed(1)
N_SAMPLES = 20
N_HIDDEN = 300
LR = 0.001
# fake data
# training data
x = torch.unsqueeze(torch.linspace(-1, 1, N_SAMPLES), 1)
y = x + 0.3 * torch.normal(torch.zeros(N_SAMPLES, 1), torch.ones(N_SAMPLES, 1))
x, y = Variable(x), Variable(y)
# test data
test_x = torch.unsqueeze(torch.linspace(-1, 1, N_SAMPLES), 1)
test_y = test_x + 0.3 * torch.normal(torch.zeros(N_SAMPLES, 1), torch.ones(N_SAMPLES, 1))
test_x, test_y = Variable(test_x, volatile=True), Variable(test_y, volatile=True)
# overfitting network
net_overfitting = nn.Sequential(
nn.Linear(1, N_HIDDEN),
nn.ReLU(),
nn.Linear(N_HIDDEN, N_HIDDEN),
nn.ReLU(),
nn.Linear(N_HIDDEN, 1)
)
# dropout network
net_dropouted = nn.Sequential(
nn.Linear(1, N_HIDDEN),
nn.Dropout(0.5), # drop 50% of neuron
nn.ReLU(),
nn.Linear(N_HIDDEN, N_HIDDEN),
nn.Dropout(0.5), # drop 50% of neuron
nn.ReLU(),
nn.Linear(N_HIDDEN, 1)
)
print(net_overfitting)
print(net_dropouted)
# training
optimizer_ofit = torch.optim.Adam(net_overfitting.parameters(), lr=LR)
optimizer_drop = torch.optim.Adam(net_dropouted.parameters(), lr=LR)
loss_func = nn.MSELoss()
plt.ion()
for t in range(500):
pred_ofit = net_overfitting(x)
pred_drop = net_dropouted(x)
loss_ofit = loss_func(pred_ofit, y)
loss_drop = loss_func(pred_drop, y)
optimizer_ofit.zero_grad()
optimizer_drop.zero_grad()
loss_ofit.backward()
loss_drop.backward()
optimizer_ofit.step()
optimizer_drop.step()
if t % 10 == 0: # 每10步画一次图
# 将神经网络转换test形式,画好图之后改回训练形式
net_overfitting.eval()
net_dropouted.eval() # 因为drop网络在train的时候和test的时候参数不一样
plt.cla()
test_pred_ofit = net_overfitting(test_x)
test_pred_drop = net_dropouted(test_x)
plt.scatter(x.data.numpy(), y.data.numpy(), c='magenta', alpha=0.5, label='train')
plt.scatter(test_x.data.numpy(), test_y.data.numpy(), c='cyan', s=50, alpha=0.5, label='test')
plt.plot(test_x.data.numpy(), test_pred_ofit.data.numpy(), 'r-', lw=3, label='overfitting')
plt.plot(test_x.data.numpy(), test_pred_drop.data.numpy(), 'b--', lw=3, label='dropout(50%)')
plt.text(0, -1.2, r'$overfitting loss=%.4f$' % loss_func(test_pred_ofit, test_y).data[0],
fontdict={'size': 10, 'color': 'red'})
plt.text(0, -1.5, r'$dropout loss=%.4f$' % loss_func(test_pred_drop, test_y).data[0],
fontdict={'size': 10, 'color': 'red'})
plt.legend(loc='upper left')
plt.ylim((-2.5, 2.5))
plt.pause(0.1)
# 将两个网络改回train形式
net_overfitting.train()
net_dropouted.train()
plt.ioff()
plt.show()
|
[
"you@example.com"
] |
you@example.com
|
c44a67a3eaabc76d6e5635f62a79a69aa80faa77
|
e5a511e346f5be8a82fe9cb2edf457aa7e82859c
|
/PythonNEW/Practice/StringRemoveExistingIdentitaion.py
|
f66992651f064d1098bc0a3e95b04ea1ee0ff896
|
[] |
no_license
|
nekapoor7/Python-and-Django
|
8397561c78e599abc8755887cbed39ebef8d27dc
|
8fa4d15f4fa964634ad6a89bd4d8588aa045e24f
|
refs/heads/master
| 2022-10-10T20:23:02.673600
| 2020-06-11T09:06:42
| 2020-06-11T09:06:42
| 257,163,996
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
""" Write a Python program to remove existing indentation from all of the lines in a given text."""
import textwrap
sample_text = '''
Python is a widely used high-level, general-purpose, interpreted,
dynamic programming language. Its design philosophy emphasizes
code readability, and its syntax allows programmers to express
concepts in fewer lines of code than possible in languages such
as C++ or Java.
'''
text = textwrap.dedent(sample_text)
print(text)
|
[
"neha.kapoor070789@gmail.com"
] |
neha.kapoor070789@gmail.com
|
9393d21961b0043d35b932fd166c21ca22c72e0c
|
e456cdf76c1419413931d218317d44ea4b7c3fb7
|
/demo/django/pokedex/admin.py
|
fd91861f03cd25790e7dec41cc349aba98f35f27
|
[
"MIT"
] |
permissive
|
Nekmo/angular-django
|
cbbd8bb0c6baeea6e788c5623fb98102b443f1e9
|
0464747806ce4e79571d3a72db0f04e15f0c6e5e
|
refs/heads/master
| 2023-08-27T16:03:10.006482
| 2021-11-08T23:15:14
| 2021-11-08T23:15:14
| 298,419,330
| 14
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
from django.contrib import admin
# Register your models here.
from pokedex.models import Specie
@admin.register(Specie)
class SpecieAdmin(admin.ModelAdmin):
pass
|
[
"contacto@nekmo.com"
] |
contacto@nekmo.com
|
cc175ac74f032d57d8641a106ebead8e8f7f8a10
|
7c9707f0f1cb8e633ac605934f3dbd8036790868
|
/projet/rpi_manager/migrations/0002_ph.py
|
71da2a49b682367bb47761ea2e6341addf2a5fc5
|
[] |
no_license
|
ometeore/hydropo
|
891e1abd4c1b8ccd0a3b27a043abf894b70ceb5b
|
324076d4b7ddbd14e718c424eb24d129c2a2243c
|
refs/heads/master
| 2023-06-14T08:35:55.838469
| 2021-07-04T16:28:09
| 2021-07-04T16:28:09
| 290,198,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 991
|
py
|
# Generated by Django 3.1 on 2020-08-25 13:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("rpi_manager", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Ph",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("date", models.DateTimeField()),
("value", models.FloatField()),
(
"rpi",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="rpi_manager.rpi",
),
),
],
),
]
|
[
"pilt64@hotmail.fr"
] |
pilt64@hotmail.fr
|
89e026a18c52f389d46597ba589fee07cc32a352
|
d44d33899aaab3d2a8b693b648701d49810aca12
|
/cip5-multiprofile-wave.py
|
e001a331576902efdc7df62b78d3e40a59f81237
|
[] |
no_license
|
izham-sugita/CIP
|
208eee2e108a910abd3a137083638244b8f60303
|
a0cd77531a34ad32a0cebeb6069123e89aceb0b5
|
refs/heads/master
| 2021-06-27T14:51:45.696969
| 2021-01-07T11:44:04
| 2021-01-07T11:44:04
| 204,810,048
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,796
|
py
|
import numpy as np
import matplotlib.pyplot as plt
#Changing the default size
#fig_size = plt.rcParams["figure.figsize"]
#fig_size[0] = 20
#fig_size[1] = 16
#plt.rcParams["figure.figsize"] = fig_size
imax = 2001
imax = int( input("Enter imax ") )
length = 2.0 #-1<=x<=1
dx = length/(imax-1)
u = np.ndarray((imax),dtype=np.float64)
un = np.ndarray((imax),dtype=np.float64)
ud1 = np.zeros_like(u)
ud1n = np.zeros_like(u)
ud2 = np.zeros_like(u)
ud2n = np.zeros_like(u)
x = np.ndarray((imax),dtype=np.float64)
'''
for i in range(imax):
x[i] = i*dx
u[i] = 0.0
un[i] =0.0
if x[i] >= 4.0 and x[i] <= 6.0:
u[i] = 1.0
un[i]=1.0
'''
u[:] = 0.0
un[:] = 0.0
#multiple wave profile
for i in range(imax):
x[i] = -1.0 + i*dx
if x[i] >=-0.8 and x[i] <=-0.6:
u[i] = np.exp( -np.log(2.0)*(x[i]+0.7)**2 / 0.0009 )
un[i] = u[i]
elif x[i] >=-0.5 and x[i] <=-0.2:
u[i] = 1.0
un[i] = u[i]
elif x[i] >=0.0 and x[i] <=0.2:
u[i] = 1.0 - abs(10.0*x[i] - 1.0)
un[i] = u[i]
elif x[i] >=0.4 and x[i] <=0.6:
u[i] = np.sqrt( 1.0 - 100.0*(x[i] - 0.5)**2 )
un[i] = u[i]
#Initiate derivatives value
for i in range( 1, imax-1 ):
ud1[i] = 0.5*(u[i+1] - u[i-1])/dx
for i in range( 1, imax-1 ):
ud2[i] = 0.5*(ud1[i+1] - ud1[i-1])/dx
dt = np.float64(input("Enter dt, dx=%s\n "%dx ))
elapsed = 10.0
itermax = int( elapsed/dt )-int(elapsed/2.0) #adjusted timestep; don't know why
print("Maximum iteration: ", itermax)
c = 1.0
c = float(input("Enter c, +1.0 or -1.0 "))
alpha = c*dt/dx
eps = 1.0e-6
uexact = np.zeros_like(u)
'''
#calculating exact solution
for i in range(imax):
r1 = itermax*dt + 4.0
r2 = r1 + (6.0 - 4.0) #did this on purpose, a reminder
if x[i] >=r1 and x[i] <= r2:
uexact[i] = 1.0
'''
uexact[:] = u[:]
#matrix A
up = -np.sign(c)
A = np.array( [ [ (up*dx)**5, (up*dx)**4, (up*dx)**3],
[5.0*(up*dx)**4, 4.0*(up*dx)**3, 3.0*(up*dx)**2],
[20.0*(up*dx)**3, 12.0*(up*dx)**2, 6.0*up*dx] ] )
coef = np.array( [0.0, 0.0, 0.0] )
b = np.array( [0.0, 0.0, 0.0] )
xx = -c*dt
steps = 1
eps = 1.0e-8
phi = np.zeros_like(u)
for iter in range(itermax):
for i in range(1,imax-1):
up = -np.sign(c)
iup = i + int(up)
xx = -c*dt
b[0] = ( u[iup] - u[i] ) -0.5*ud2[i]*dx*dx - ud1[i]*up*dx
b[1] = ( ud1[iup] - ud1[i] ) - ud2[i]*up*dx
b[2] = ud2[iup] - ud2[i]
coef = np.linalg.solve(A, b)
a0 = coef[0]
a1 = coef[1]
a2 = coef[2]
a3 = ud2[i]*0.5
a4 = ud1[i]
#limiter
udif = ( u[iup] - u[i] )/dx*up
#minmod limiter
ratio = (u[i] - u[i-1]) / (u[i+1] - u[i] + eps)
phi0 = min(10.0*dx, ratio) #default is 1.0
phi[iup] = max(0.0, phi0)
#phi[iup] = 0.0
#van Leer (continuous function) #very diffusive
#ratio = (u[i] - u[i-1]) / (u[i+1] - u[i] + eps)
#phi[iup] = (ratio + abs(ratio)) / (1.0 + ratio)
#un[i] = a0*xx**5 + a1*xx**4 + a2*xx**3 + a3*xx**2 + a4*xx + u[i]
un[i] = u[i] + (1.0-phi[iup])*(a4*xx + a3*xx**2 + a2*xx**3 + a1*xx**4 + a0*xx**5) \
+ phi[iup]*(udif*xx)
ud1n[i] = (1.0 - phi[iup])*( 5.0*a0*xx**4 + 4.0*a1*xx**3 + 3.0*a2*xx**2 + 2.0*a3*xx \
+ ud1[i] ) + phi[iup]*udif
# weight 0.98, 0.01 is the least diffusive
#putting weight only on the first derivative
#un[i] = u[i] + (1.0 - phi[iup])*(a4*xx) + a3*xx**2 + a2*xx**3 + a1*xx**4 + a0*xx**5 \
# + phi[iup]*(udif*xx)
#ud1n[i] = 5.0*a0*xx**4 + 4.0*a1*xx**3 + 3.0*a2*xx**2 + 2.0*a3*xx \
# + (1.0 - phi[iup])*ud1[i] + phi[iup]*udif
#the second derivative is not affected
ud2n[i] = 20.0*a0*xx**3 + 12.0*a1*xx**2 + 6.0*a2*xx + ud2[i]
#update periodic BC
u[0] = un[imax-2]
ud1[0] = ud1n[imax-2]
ud2[0] = ud2n[imax-2]
u[imax-1] = un[imax-2]
ud1[imax-1] = ud1n[imax-2]
ud2[imax-1] = ud2n[imax-2]
for i in range(1, imax-1):
u[i] = un[i]
ud1[i] = ud1n[i]
ud2[i] = ud2n[i]
#update periodic BC
#u[imax-1] = un[imax-2]
#ud1[imax-1] = ud1n[imax-2]
#ud2[imax-1] = ud2n[imax-2]
#u[0] = un[imax-2]
#ud1[0] = ud1n[imax-2]
#ud2[0] = ud2n[imax-2]
'''
#update
u[:] = un[:]
ud1[:] = ud1n[:]
ud2[:] = ud2n[:]
'''
#if iter%steps == 0:
# num = str(iter)
# filename = "./data1D/f"+num.zfill(5)+".csv"
# fp = open(filename, "w")
# fp.write("x, u\n")
# for i in range(imax):
# str1 = str(x[i])
# str2 = str(u[i])
# comma = ","
# nextline = "\n"
# strall = str1+comma+str2+nextline
# fp.write(strall)
# fp.close()
current = iter*dt + dt
display = "t = %.4f"%(current)
phi[:] = 0.0
current = iter*dt + dt
display = "t = %.4f"%(current)
#plt.axis([0.0, 10.0, -0.5, 1.5 ] )
plt.axis([-2.0, 2.0, -0.5, 1.5 ] )
plt.title(display)
plt.ylabel("U")
plt.xlabel("x")
plt.plot(x,u,'bo-')
plt.pause(0.001)
plt.clf() #clear drawing
filename = "final.png"
#plt.axis([0.0, 10.0, -0.5, 1.5 ] )
plt.axis([-2.0, 2.0, -0.5, 1.5 ] )
plt.plot(x,u, 'bo-', x, uexact,'kv-')
plt.title(display)
plt.ylabel("U")
plt.xlabel("x")
plt.savefig(filename)
plt.show()
#plt.show(block=False)
#plt.pause(3)
#plt.close()
filename = "cip5-final.csv"
fp = open(filename, "w")
fp.write("x, u\n")
for i in range(imax):
str1 = str(x[i])
str2 = str(u[i])
comma = ","
nextline = "\n"
strall = str1+comma+str2+nextline
fp.write(strall)
fp.close()
|
[
"sugita5019@gmail.com"
] |
sugita5019@gmail.com
|
898a057527760f01aeb95b618322cf09388c1f42
|
02e23da0431623db86c8138bda350a1d526d4185
|
/Archivos Python Documentos/Graficas/.history/TRABAJO_SPT_v3_20200224230649.py
|
0fe0123926d8f3bed47ddd88db91bc709c442b12
|
[] |
no_license
|
Jaamunozr/Archivos-python
|
d9996d3d10ff8429cd1b4c2b396016a3a5482889
|
1f0af9ba08f12ac27e111fcceed49bbcf3b39657
|
refs/heads/master
| 2022-08-05T14:49:45.178561
| 2022-07-13T13:44:39
| 2022-07-13T13:44:39
| 244,073,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,848
|
py
|
import os
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
#------------------------------------------------------------------------------
os.system("clear")
fig = pl.figure()
axx = Axes3D(fig)
raiz=np.sqrt
ln=np.log
puntoX=float(0)
puntoY=float(0)
#puntoX=float(input("Seleccione la coordenada en X donde desea calcular el potencial: "))
#puntoY=float(input("Seleccione la coordenada en Y donde desea calcular el potencial: "))
print("Calculando ...")
#------------------------------------------------------------------------------
Xa = np.arange(-10, 10, 0.1) #Rango de coordenadas de X
Ya = np.arange(-10, 10, 0.1) #Rango de coordenadas de Y
l = 2 #Longitud del electrodo [m]
rho= 100 #Resistividad de terrreno [Ohm/m]
Ik=200 #Corriente de falla [A] (Total)
Rad=0.01 #Radio del electrodo [m]
Electrodos=8 #Número de electrodos
Pos1=4 #Posición 1 en Y para analisis de grafica 2D
Pos2=0 #Posición 2 en Y para analisis de grafica 2D
#------------------------------------------------------------------------------
#Posición de los electrodos
#------------------------------------------------------------------------------
P=np.array([
[-4,-4], #Electrodo A
[0,-4], #Electrodo B
[4,-4], #Electrodo C
[-4,0], #Electrodo D
[4,0], #Electrodo E
[-4,4], #Electrodo F
[0,4], #Electrodo G
[4,4] #Electrodo H
])
#------------------------------------------------------------------------------
E=Electrodos-1
ik=Ik/Electrodos
Vt=np.zeros((np.count_nonzero(Xa),np.count_nonzero(Ya)))
m=np.zeros((Electrodos,1))
V=np.zeros((Electrodos,1))
k=0
m2=np.zeros((Electrodos,1))
V2=np.zeros((Electrodos,1))
#------------------------------------------------------------------------------
#Cálculo del punto ingresado
#------------------------------------------------------------------------------
i=0
while i<=E:
m2[i][0] =round(raiz((((P[i][0])-puntoX)**2)+(((P[i][1])-puntoY)**2)),4)
o,u=((P[i][0])-puntoX),((P[i][1])-puntoY)
if ((o ==0) and (u==0)) or (m2[i][0]==0):
#print("Elementos de matriz",k,t, "x,y",P[i][0],P[i][1],"punto de eje",X,Y )
m2[i][0]=Rad
V2[i][0] =ln((l+raiz((m2[i][0])**2+l**2))/(m2[i][0]))
i += 1
Vt2=(np.sum(V2)*(rho*ik))/(2*np.pi*l)
print("El potencial en el punto (",puntoX,",",puntoY,"), es de",round(Vt2,3),"[V]")
#------------------------------------------------------------------------------
#Cálculo de la malla
#------------------------------------------------------------------------------
Vxy = [1] * (np.count_nonzero(Ya))
while k<np.count_nonzero(Ya):
Y=round(Ya[k],3)
t=0
while t<np.count_nonzero(Xa):
X=round(Xa[t],3)
i=0
while i<=E:
m[i][0] =round(raiz((((P[i][0])-X)**2)+(((P[i][1])-Y)**2)),4)
o,u=((P[i][0])-X),((P[i][1])-Y)
if ((o ==0) and (u==0)) or (m[i][0]==0):
#print("Elementos de matriz",k,t, "x,y",P[i][0],P[i][1],"punto de eje",X,Y )
m[i][0]=Rad
V[i][0] =ln((l+raiz((m[i][0])**2+l**2))/(m[i][0]))
i += 1
Vt[k][t]=np.sum(V)
if Y==Pos1:
Vxa=Vt[k]
if Y==Pos2:
Vxb=Vt[k]
if (Y==X) and ((X-Y)==0):
Vxy[k]=Vt[k][t]*(rho*ik)/(2*np.pi*l)
t +=1
k +=1
Vtt=(Vt*(rho*ik))/(2*np.pi*l)
Vxa=(Vxa*(rho*ik))/(2*np.pi*l)
Vxb=(Vxb*(rho*ik))/(2*np.pi*l)
aa=np.where(np.amax(Vtt) == Vtt)
print ("Valor máximo de tensión (GPR):",round(Vtt[::].max(),3),"[V], en posición: (",round(Xa[aa[0][0]],2),",",round(Ya[aa[1][0]],2),")")
bb=np.where(np.amin(Vtt) == Vtt)
print("Valor de Resistencia de puesta a tierra:", (round(Vtt[::].max(),3)/Ik), "[Ohm]")
#print ("Valor mínimo de tensión:",round(Vtt[::].min(),3),"[V], en posición: (",round(Xa[bb[0][0]],2),",",round(Ya[bb[1][0]],2),")")
print ("Número de elementos de Vt:",np.count_nonzero(Vtt))
#------------------------------------------------------------------------------
# GRAFICAS 3D
#------------------------------------------------------------------------------
# Configurar una figura dos veces más alta que ancha
#fig = plt.figure(figsize=plt.figaspect(0.2))
#fig = plt.figure(4,figsize=(6,4.5)) #(Ancho, alto)
#fig.suptitle('Potencial')
fig = plt.figure(figsize=plt.figaspect(2.))
fig.suptitle('A tale of 2 subplots')
# Primera imagen a imprimir
ax = fig.add_subplot(2, 2, 1, projection='3d')
X, Y = np.meshgrid(Xa, Ya)
#surf = ax.plot_surface(X, Y, Vtt, cmap = cm.get_cmap("jet"))#, antialiased=False)
surf = ax.plot_surface(X, Y, Vtt, rstride=1, cstride=1,
linewidth=0, antialiased=False)
ax.set_zlim(300, 1800)
#fig.colorbar(surf)
#------------------------------------------------------------------------------
#Graficas en 2D
#------------------------------------------------------------------------------
x1=Xa
ax = fig.add_subplot(2, 2, 2)
ax.plot(x1, Vxa, color="blue", linewidth=1.0, linestyle="-")
ax.title.set_text('Eje X1 vs V')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
ax.set_ylabel('Grafica 1')
ax = fig.add_subplot(2, 2, 3)
ax.plot(x1, Vxb, color="red", linewidth=1.0, linestyle="-")
ax.title.set_text('Eje X2 vs V')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
ax.set_ylabel('Grafica 2')
ax = fig.add_subplot(2, 2, 4)
ax.plot(x1, Vxy, color="green", linewidth=1.0, linestyle="-")
ax.title.set_text('Eje X,Y vs V')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
ax.set_ylabel('Grafica 3')
plt.pause(25)
pl.savefig('tierras.pdf')
|
[
"jaamunozr@gmail.com"
] |
jaamunozr@gmail.com
|
423824d04b9ff1a989d3a18f132c057b03f82f22
|
4554f8d3ab1a6267b17dad2b4d2c47b0abe8d746
|
/benchmarking/remote/devices.py
|
c7cacd80d2eb8e3a4eb17eebb98a6ac45237cf32
|
[
"Apache-2.0"
] |
permissive
|
jteller/FAI-PEP
|
44fead3ca26f4844067d455c86ac8c5bfaf79a14
|
73b8a08815675135e9da7d68375d1218cbd04eaa
|
refs/heads/master
| 2020-04-29T06:04:19.197966
| 2019-03-15T23:32:54
| 2019-03-15T23:32:54
| 175,904,011
| 0
| 0
|
Apache-2.0
| 2019-03-15T23:30:04
| 2019-03-15T23:30:04
| null |
UTF-8
|
Python
| false
| false
| 2,484
|
py
|
#!/usr/bin/env python
##############################################################################
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
from utils.devices import devices as devices_dict
class Devices(object):
def __init__(self, filename=None):
if filename:
# if the user provides filename, we will load it.
assert os.path.isfile(filename), \
"Device file {} does not exist".format(filename)
with open(filename, "r") as f:
self.devices = json.load(f)
else:
# otherwise read from internal
self.devices = devices_dict
self._elaborateDevices()
def getFullNames(self, devices):
names = devices.split(",")
new_names = [self.devices[name]["name"]
if name in self.devices else name for name in names]
return ",".join(new_names)
def getAbbrs(self, abbr):
if abbr in self.devices:
device = self.devices[abbr]
if "abbr" in device:
return device["abbr"]
return None
def _elaborateDevices(self):
device_abbr = []
for name, _ in self.devices.items():
device = self.devices[name]
assert "name" in device, \
"Field name is required in devices"
assert device["name"] == name, \
"Device key ({}) and name ({})".format(name, device["name"]) + \
" do not match"
if "abbr" in device:
assert isinstance(device["abbr"], list), \
"Abbreviations for {} needs to be a list".format(name)
for abbr in device["abbr"]:
device_abbr.append((device, abbr))
for device_abbr_pair in device_abbr:
self._elaborateOneDevice(device_abbr_pair[0], device_abbr_pair[1])
def _elaborateOneDevice(self, device, abbr):
assert abbr not in self.devices, "Abbreviation " + \
"{} is already specified in the device list".format(abbr)
self.devices[abbr] = device
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
431a60378e86b4b85d841143ab2f513bb7bbeeff
|
1b5cc8dc487da59455dfe6749796870d51d5ab87
|
/src/collective/iptvusp/tests/test_uspvideo.py
|
72b74796685ac00b3964064bc7a733813671c2c5
|
[] |
no_license
|
simplesconsultoria/collective.iptvusp
|
eddcd726a800933127b04959bba90c63210049dc
|
89b14ee4a01e19ef5cd7198c5bdf808ef555f1f0
|
refs/heads/master
| 2021-01-01T18:29:41.272115
| 2013-03-12T19:01:25
| 2013-03-12T19:01:25
| 6,388,881
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,806
|
py
|
# -*- coding: utf-8 -*-
import unittest2 as unittest
from zope.component import createObject
from zope.component import queryUtility
from plone.app.testing import TEST_USER_ID
from plone.app.testing import setRoles
from plone.dexterity.interfaces import IDexterityFTI
from plone.app.dexterity.behaviors.exclfromnav import IExcludeFromNavigation
from collective.iptvusp.content import IUSPVideo
from collective.iptvusp.testing import INTEGRATION_TESTING
class CoverIntegrationTestCase(unittest.TestCase):
layer = INTEGRATION_TESTING
def setUp(self):
self.portal = self.layer['portal']
setRoles(self.portal, TEST_USER_ID, ['Manager'])
self.portal.invokeFactory('Folder', 'test-folder')
setRoles(self.portal, TEST_USER_ID, ['Member'])
self.folder = self.portal['test-folder']
self.folder.invokeFactory('iptvusp.uspvideo', 'c1',
template_layout='Layout A')
self.c1 = self.folder['c1']
def test_adding(self):
self.assertTrue(IUSPVideo.providedBy(self.c1))
def test_fti(self):
fti = queryUtility(IDexterityFTI,
name='iptvusp.uspvideo')
self.assertNotEqual(None, fti)
def test_schema(self):
fti = queryUtility(IDexterityFTI,
name='iptvusp.uspvideo')
schema = fti.lookupSchema()
self.assertEqual(IUSPVideo, schema)
def test_factory(self):
fti = queryUtility(IDexterityFTI,
name='iptvusp.uspvideo')
factory = fti.factory
new_object = createObject(factory)
self.assertTrue(IUSPVideo.providedBy(new_object))
def test_exclude_from_navigation_behavior(self):
self.assertTrue(IExcludeFromNavigation.providedBy(self.c1))
|
[
"erico@simplesconsultoria.com.br"
] |
erico@simplesconsultoria.com.br
|
6a28e7551bac14d5e50e838a962b64b49a7008ae
|
057722b227e9f51c78bd77b622859674016f19dc
|
/homework4/code/p7/trysvm.py
|
8783e7fd91174a983250421516e6938b0597d778
|
[] |
no_license
|
walkerning/Homework-pattern_recognition
|
56508bc66d0932ad8c9899658d8229169d800551
|
843a79d1f4cc278839ade27a593ae66e603ac4ba
|
refs/heads/master
| 2021-03-19T15:30:55.581932
| 2017-05-31T15:51:22
| 2017-05-31T15:51:22
| 84,166,823
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,740
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
from sklearn import svm
samples_w1 = np.array([[-3.0, 0.5, 2.9, -0.1, -4.0, -1.3, -3.4, -4.1, -5.1, 1.9],
[-2.9, 8.7, 2.1, 5.2, 2.2, 3.7, 6.2, 3.4, 1.6, 5.1]]).T
samples_w2 = np.array([[-2.0, -8.9, -4.2, -8.5, -6.7, -0.5, -5.3, -8.7, -7.1, -8.0],
[-8.4, 0.2, -7.7, -3.2, -4.0, -9.2, -6.7, -6.4, -9.7, -6.3]]).T
def transform_data(data):
# return 1 x1 x2 x1**2 x2**2 x1x2
return np.hstack((np.ones((data.shape[0], 1)), data, data**2, (data[:, 0] * data[:, 1])[:, np.newaxis]))
def main():
# set misclassification penalty to a large enough value
trans_samples_w1 = transform_data(samples_w1)
trans_samples_w2 = transform_data(samples_w2)
# data = np.vstack((trans_samples_w1[0, :], trans_samples_w2[0, :]))
# labels = [0, 1]
# res = svm.SVC(C=1e10, kernel="linear").fit(data, labels)
# m = np.sqrt(res.coef_[0].dot(res.coef_[0]))
# margin1 = (res.coef_.dot(trans_samples_w1[0,:]) + res.intercept_) / m
# margin2 = (res.coef_.dot(trans_samples_w2[0,:]) + res.intercept_) / m
# print "margin of w1 {}: {}; margin of w2 {}: {}".format(trans_samples_w1[0, :], margin1,
# trans_samples_w2[0, :], margin2)
for num in range(1, samples_w1.shape[0]+1):
data = np.vstack((trans_samples_w1[:num, :], trans_samples_w2[:num, :]))
labels = np.hstack((np.zeros(num), np.ones(num)))
res = svm.SVC(C=1e10, kernel="linear").fit(data, labels)
print "sample number: {}, coef: {}, b: {}, margin: {}".format(num*2, res.coef_, res.intercept_, np.sqrt(1/(res.coef_[0].dot(res.coef_[0]))))
if __name__ == "__main__":
main()
|
[
"foxdoraame@gmail.com"
] |
foxdoraame@gmail.com
|
d2f6e5faa8e1f124af00e0502dca3ad30670785e
|
b5fabc6c6de064690f8d4ee423001cf9365a3d9f
|
/flash/image/segmentation/model.py
|
9296db60cbcff1e6220d5aee051ddb36549a8b1f
|
[
"Apache-2.0"
] |
permissive
|
dmarx/lightning-flash
|
021dfd76bde6e30309f14feb5853020b0babe90d
|
4cda031c1f9c8d8754fd36b5720d2a5a7d866765
|
refs/heads/master
| 2023-09-06T06:24:29.856354
| 2021-11-24T23:38:14
| 2021-11-24T23:38:14
| 422,352,910
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,182
|
py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Union
import torch
from torch import nn
from torch.nn import functional as F
from torchmetrics import IoU
from flash.core.classification import ClassificationTask
from flash.core.data.io.input import DataKeys
from flash.core.data.io.output_transform import OutputTransform
from flash.core.registry import FlashRegistry
from flash.core.utilities.imports import _KORNIA_AVAILABLE
from flash.core.utilities.isinstance import _isinstance
from flash.core.utilities.types import (
LOSS_FN_TYPE,
LR_SCHEDULER_TYPE,
METRICS_TYPE,
OPTIMIZER_TYPE,
OUTPUT_TRANSFORM_TYPE,
OUTPUT_TYPE,
)
from flash.image.segmentation.backbones import SEMANTIC_SEGMENTATION_BACKBONES
from flash.image.segmentation.heads import SEMANTIC_SEGMENTATION_HEADS
from flash.image.segmentation.output import SegmentationLabels
if _KORNIA_AVAILABLE:
import kornia as K
class SemanticSegmentationOutputTransform(OutputTransform):
def per_sample_transform(self, sample: Any) -> Any:
resize = K.geometry.Resize(sample[DataKeys.METADATA]["size"][-2:], interpolation="bilinear")
sample[DataKeys.PREDS] = resize(sample[DataKeys.PREDS])
sample[DataKeys.INPUT] = resize(sample[DataKeys.INPUT])
return super().per_sample_transform(sample)
class SemanticSegmentation(ClassificationTask):
"""``SemanticSegmentation`` is a :class:`~flash.Task` for semantic segmentation of images. For more details, see
:ref:`semantic_segmentation`.
Args:
num_classes: Number of classes to classify.
backbone: A string or model to use to compute image features.
backbone_kwargs: Additional arguments for the backbone configuration.
head: A string or (model, num_features) tuple to use to compute image features.
head_kwargs: Additional arguments for the head configuration.
pretrained: Use a pretrained backbone.
loss_fn: Loss function for training.
optimizer: Optimizer to use for training.
lr_scheduler: The LR scheduler to use during training.
metrics: Metrics to compute for training and evaluation. Can either be an metric from the `torchmetrics`
package, a custom metric inherenting from `torchmetrics.Metric`, a callable function or a list/dict
containing a combination of the aforementioned. In all cases, each metric needs to have the signature
`metric(preds,target)` and return a single scalar tensor. Defaults to :class:`torchmetrics.IOU`.
learning_rate: Learning rate to use for training.
multi_label: Whether the targets are multi-label or not.
output: The :class:`~flash.core.data.io.output.Output` to use when formatting prediction outputs.
output_transform: :class:`~flash.core.data.io.output_transform.OutputTransform` use for post processing samples.
"""
output_transform_cls = SemanticSegmentationOutputTransform
backbones: FlashRegistry = SEMANTIC_SEGMENTATION_BACKBONES
heads: FlashRegistry = SEMANTIC_SEGMENTATION_HEADS
required_extras: str = "image"
def __init__(
self,
num_classes: int,
backbone: Union[str, nn.Module] = "resnet50",
backbone_kwargs: Optional[Dict] = None,
head: str = "fpn",
head_kwargs: Optional[Dict] = None,
pretrained: Union[bool, str] = True,
loss_fn: LOSS_FN_TYPE = None,
optimizer: OPTIMIZER_TYPE = "Adam",
lr_scheduler: LR_SCHEDULER_TYPE = None,
metrics: METRICS_TYPE = None,
learning_rate: float = 1e-3,
multi_label: bool = False,
output: OUTPUT_TYPE = None,
output_transform: OUTPUT_TRANSFORM_TYPE = None,
) -> None:
if metrics is None:
metrics = IoU(num_classes=num_classes)
if loss_fn is None:
loss_fn = F.cross_entropy
# TODO: need to check for multi_label
if multi_label:
raise NotImplementedError("Multi-label not supported yet.")
super().__init__(
model=None,
loss_fn=loss_fn,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
metrics=metrics,
learning_rate=learning_rate,
output=output or SegmentationLabels(),
output_transform=output_transform or self.output_transform_cls(),
)
self.save_hyperparameters()
if not backbone_kwargs:
backbone_kwargs = {}
if not head_kwargs:
head_kwargs = {}
if isinstance(backbone, nn.Module):
self.backbone = backbone
else:
self.backbone = self.backbones.get(backbone)(**backbone_kwargs)
self.head: nn.Module = self.heads.get(head)(
backbone=self.backbone, num_classes=num_classes, pretrained=pretrained, **head_kwargs
)
self.backbone = self.head.encoder
def training_step(self, batch: Any, batch_idx: int) -> Any:
batch = (batch[DataKeys.INPUT], batch[DataKeys.TARGET])
return super().training_step(batch, batch_idx)
def validation_step(self, batch: Any, batch_idx: int) -> Any:
batch = (batch[DataKeys.INPUT], batch[DataKeys.TARGET])
return super().validation_step(batch, batch_idx)
def test_step(self, batch: Any, batch_idx: int) -> Any:
batch = (batch[DataKeys.INPUT], batch[DataKeys.TARGET])
return super().test_step(batch, batch_idx)
def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:
batch_input = batch[DataKeys.INPUT]
batch[DataKeys.PREDS] = super().predict_step(batch_input, batch_idx, dataloader_idx=dataloader_idx)
return batch
def forward(self, x) -> torch.Tensor:
res = self.head(x)
# some frameworks like torchvision return a dict.
# In particular, torchvision segmentation models return the output logits
# in the key `out`.
if _isinstance(res, Dict[str, torch.Tensor]):
res = res["out"]
return res
@classmethod
def available_pretrained_weights(cls, backbone: str):
result = cls.backbones.get(backbone, with_metadata=True)
pretrained_weights = None
if "weights_paths" in result["metadata"]:
pretrained_weights = list(result["metadata"]["weights_paths"])
return pretrained_weights
@staticmethod
def _ci_benchmark_fn(history: List[Dict[str, Any]]):
"""This function is used only for debugging usage with CI."""
assert history[-1]["val_iou"] > 0.2
|
[
"noreply@github.com"
] |
dmarx.noreply@github.com
|
bf2bb21fe32c046e31ac269a94e444f91dc0217b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03626/s873858203.py
|
5c64e4b78d0537220084f2cc138a77120c711579
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
MOD = 1000000007
n = int(input())
s1 = input()
s2 = input()
if s1[0] == s2[0]:
ans = 3
i = 1
prev = 1
else:
ans = 6
i = 2
prev = 2
while i<n:
if s1[i] == s2[i]:
i += 1
if prev == 1:
ans *= 2
else:
prev = 1
else:
i += 2
if prev == 1:
ans *= 2
prev = 2
else:
ans *= 3
ans %= MOD
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
deb150440060e0d6c968a2ccf2812970012b495a
|
27c4e774f053594473da202c1c45dcbf237465be
|
/Scorm.py
|
566403fdd0e407806057a4daa6e2727586ed572a
|
[] |
no_license
|
Gamboua/zope-migration
|
34e6b27962859352fe08a4277a8215b36b01889c
|
7a83ed67c5ea561bfa8aa300728390b7220f3633
|
refs/heads/master
| 2020-12-25T14:49:22.173420
| 2017-10-19T20:47:50
| 2017-10-19T20:47:50
| 67,830,154
| 0
| 1
| null | 2016-10-20T21:42:09
| 2016-09-09T20:20:57
|
PHP
|
UTF-8
|
Python
| false
| false
| 1,910
|
py
|
import paramiko
import os
from scp import SCPClient
from config import *
from Command import Command
import random, string
class Scorm:
def __init__(self, scorm, course):
self.course = course
self.scp = None
self.type = 'scorm'
self.section = 0
self.folder = self.get_if_exists('folder', scorm)
self.title = self.get_if_exists('title', scorm)
def get_if_exists(self, parameter, json):
return json.get(parameter) if parameter in json else None
def scorm_add(self):
self.scorm_import_folder()
zip_name = self.scorm_zip()
Command.command_execute(Command.activity_create_command(
options=self.get_scorm_options(zip_name), type=self.type, id=self.course.id
))
def get_scorm_options(self, name):
params = []
if self.section is not None:
params.append('--section %s' % self.section)
if self.title:
params.append('--name "%s"' % self.title)
params.append('--filepath /tmp/%s.zip' % name)
return ' '.join(params)
def scorm_zip(self):
name = ''.join(random.choice(string.ascii_letters) for x in range(8))
os.chdir(self.folder)
os.system('zip -r /tmp/%s *' % name)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
return name
def scorm_import_folder(self):
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(REMOTE_SCORM_SERVER, REMOTE_SCORM_PORT, REMOTE_SCORM_USER)
scp = SCPClient(client.get_transport())
if not os.path.isdir('/opt/zope298/courses'):
os.makedirs('/opt/zope298/courses')
scp.get(
self.folder,
'/opt/zope298/courses/',
recursive=True
)
scp.close()
|
[
"gabriel.bonfim@4linux.com.br"
] |
gabriel.bonfim@4linux.com.br
|
4a5b7c6844ca194b50ed70323648cba57b6e0b8d
|
c6e5bbafd810d23e0ee46d69026cba35339d1dbd
|
/accounts/managers.py
|
42d3aae755fba1358031994ccd3a06d4ca8dcdd1
|
[] |
no_license
|
mfonism/django-inqueerstigate
|
9c8b729848bf3df9fb9ec991ec47391b69ad7b66
|
af5420bf8adf6aa89533cd1462d9eeed6e8c88db
|
refs/heads/main
| 2023-05-26T12:59:55.774989
| 2021-06-07T11:46:48
| 2021-06-07T11:46:48
| 323,681,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,179
|
py
|
from django.contrib.auth.models import BaseUserManager
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, email, password, **extra_fields):
if not email:
raise ValueError("The given email must be set")
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
extra_fields.setdefault("is_staff", False)
extra_fields.setdefault("is_superuser", False)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault("is_staff", True)
extra_fields.setdefault("is_superuser", True)
if extra_fields.get("is_staff") is not True:
raise ValueError("Superuser must have is_staff=True.")
if extra_fields.get("is_superuser") is not True:
raise ValueError("Superuser must have is_superuser=True.")
return self._create_user(email, password, **extra_fields)
|
[
"mfonetimfon@gmail.com"
] |
mfonetimfon@gmail.com
|
847e83de22c9dbcb04f87362a0d956c786584799
|
caace044baf7a6f2b0bda65ae361eed06bddfc3c
|
/dailyQuestion/2020/2020-06/06-01/python/solution_items.py
|
1f7df5de69c465f7a57e918ca5eee350c02c2603
|
[
"Apache-2.0"
] |
permissive
|
russellgao/algorithm
|
fd6126e89c40d7d351c53bbd5fde690c9be899ef
|
ad5e724d20a8492b8eba03fc0f24e4ff5964b3ea
|
refs/heads/master
| 2023-03-28T03:00:02.370660
| 2021-03-28T10:56:38
| 2021-03-28T10:56:38
| 259,038,372
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,550
|
py
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
# 迭代
def sortList(head: ListNode) -> ListNode:
head_len = 0
invc = 1
h = head
while h :
head_len += 1
h = h.next
result = ListNode(0)
result.next = head
while invc <= head_len :
pre = result
h = result.next
while h :
h1 ,i = h , invc
while i and h :
i -= 1
h = h.next
if i :
break
h2, i = h, invc
while i and h :
i -= 1
h = h.next
c1, c2 = invc, invc-i
while c1 and c2 :
if h1.val > h2.val :
pre.next = h2
h2 = h2.next
c2 -= 1
else :
pre.next = h1
h1 = h1.next
c1 -= 1
pre = pre.next
pre.next = h1 if c1 else h2
while c1 > 0 or c2 > 0 :
pre = pre.next
c1 -= 1
c2 -= 1
pre.next = h
invc <<= 1
return result.next
if __name__ == "__main__" :
node = ListNode(4)
node.next = ListNode(2)
node.next.next = ListNode(1)
node.next.next.next = ListNode(3)
node.next.next.next.next = ListNode(5)
result = sortList(node)
while result :
print(result.val)
result = result.next
print()
|
[
"gaoweizong@hd123.com"
] |
gaoweizong@hd123.com
|
db0454c4c301f4b509ebb198c08bac7e87c6a3bd
|
d19d16ddc922b0915aff982568c5c71ee58fb8b9
|
/dataset/utils.py
|
f13a627e795ae92c6dca77770e719e98d0542e2e
|
[] |
no_license
|
zhaoyuzhi/HSGAN
|
036a6fec722d564f9b203f6032bf47039c1eadd4
|
f974761ec4a65ef58283ae4ccba618b97e79c4bc
|
refs/heads/main
| 2023-08-03T10:06:05.195187
| 2023-07-27T14:21:54
| 2023-07-27T14:21:54
| 337,642,689
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,688
|
py
|
import os
import numpy as np
# ----------------------------------------
# PATH processing
# ----------------------------------------
def check_path(path):
if not os.path.exists(path):
os.makedirs(path)
def get_files(path):
# read a folder, return the complete path
ret = []
for root, dirs, files in os.walk(path):
for filespath in files:
ret.append(os.path.join(root, filespath))
return ret
def get_jpgs(path):
# read a folder, return the image name
ret = []
for root, dirs, files in os.walk(path):
for filespath in files:
ret.append(filespath)
return ret
def get_mats(path):
# read a folder, return the image name
ret = []
for root, dirs, files in os.walk(path):
for filespath in files:
if filespath[-3:] == 'mat':
ret.append(os.path.join(root, filespath))
return ret
def get_mats_name(path):
# read a folder, return the image name
ret = []
for root, dirs, files in os.walk(path):
for filespath in files:
if filespath[-3:] == 'mat':
ret.append(filespath.split('.')[0])
return ret
def get_bmps(path):
# read a folder, return the image name
ret = []
for root, dirs, files in os.walk(path):
for filespath in files:
if filespath[-3:] == 'bmp':
ret.append(os.path.join(root, filespath))
return ret
def get_pairs_name(path):
# read a folder, return the image name
ret = []
for root, dirs, files in os.walk(path):
for filespath in files:
if filespath[-3:] == 'mat':
ret.append(filespath.split('.')[0])
return ret
# ----------------------------------------
# PATH processing
# ----------------------------------------
def text_readlines(filename):
# Try to read a txt file and return a list.Return [] if there was a mistake.
try:
file = open(filename, 'r')
except IOError:
error = []
return error
content = file.readlines()
# This for loop deletes the EOF (like \n)
for i in range(len(content)):
content[i] = content[i][:len(content[i])-1]
file.close()
return content
def text_save(content, filename, mode = 'a'):
# save a list to a txt
# Try to save a list variable in txt file.
file = open(filename, mode)
for i in range(len(content)):
file.write(str(content[i]) + '\n')
file.close()
def savetxt(name, loss_log):
np_loss_log = np.array(loss_log)
np.savetxt(name, np_loss_log)
|
[
"noreply@github.com"
] |
zhaoyuzhi.noreply@github.com
|
971930662e9f48b55e5e7268f17b00a473b909c6
|
4fb5b869f6690b73e32a2d8624f5fc8954540b42
|
/pypiplot/examples.py
|
b73f61adfb29a23b32d768d116b50680a0502255
|
[
"MIT"
] |
permissive
|
erdogant/pypiplot
|
cc8eb15f9b6855cba270256591ba8b1ec4ae41f6
|
2016cca3d0b4022cda1806c2c4b8c4eb2d31ee19
|
refs/heads/master
| 2023-04-16T03:26:26.935072
| 2023-02-21T23:46:01
| 2023-02-21T23:46:01
| 293,334,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,271
|
py
|
import pypiplot
# print(pypiplot.__version__)
# print(dir(Pypiplot))
from pypiplot import Pypiplot
# %% Update all libraries to date.
pp = Pypiplot(username='erdogant', repo_type=['owner', 'fork'])
pp.update()
results = pp.stats()
pp.plot_year(vmin=700)
pp.plot()
pp.plot_year()
# %% Top 10 best repos
pp = Pypiplot(username='erdogant', savepath='D://REPOS/pypiplot/repo_data/')
# Get download statistics
pp.stats()
# Get top 10
repo=pp.results['data'].sum().sort_values()[-10:].index.values
# Get stats for the top10
pp.stats(repo=repo)
# Plot
pp.plot()
#
pp.plot_year()
#
pp.plot_cal()
#
path = 'D://REPOS/erdogant.github.io/docs/imagesc/pypi/pypi_heatmap_full.html'
pp.plot_heatmap(vmin=10, vmax=2000, cmap='interpolateOranges', path=path)
# %% Plot
# Init
pp = Pypiplot(username='erdogant', savepath='D://REPOS/pypiplot/repo_data/')
# Get download statistics
results = pp.stats()
# Store svg on github.io
# path = 'D://REPOS/erdogant.github.io/docs/imagesc/pypi/pypi_heatmap.html'
path = 'D://REPOS/erdogant.github.io/docs/imagesc/pypi/pypi_heatmap.html'
path = 'C://temp/pypi_heatmap.html'
pp.plot_year(path=path, vmin=700)
# Store all repo info in github.io
pp.plot(legend=False)
# %% D3blocks
pp = Pypiplot(username='d3blocks')
pp.update(repo=['d3blocks'])
pp.stats(repo='d3blocks')
pp.plot()
# %%
pp = Pypiplot(username='erdogant')
pp.stats(repo='distfit')
pp.plot_year()
pp.plot(vmin=25)
# %% Update single repo
pp.update(repo=['bnlearn'])
pp.update(repo='bnlearn')
results = pp.stats(repo=['distfit','pca', 'bnlearn'])
pp.plot(legend=True)
# %% Get some stats
results = pp.stats(repo=['df2onehot','pca','bnlearn','ismember','thompson'])
pp.plot(legend=True)
# %%
pp = Pypiplot(username='erdogant')
pp.stats(repo='distfit')
pp.plot_year()
pp.plot(vmin=25)
pp.stats(repo='worldmap')
pp.plot_year()
pp.stats(repo='hnet')
pp.plot_year()
pp.stats(repo='ismember')
pp.plot_year()
pp.stats(repo='flameplot')
pp.plot_year()
pp.stats(repo='pca')
pp.plot_year()
pp.stats()
pp.stats(repo=['df2onehot','clustimage','bnlearn','distfit','pypickle','clusteval','findpeaks', 'kaplanmeier','pca','colourmap'])
pp.results['data'].rolling(window=30).mean().plot(figsize=(15,10))
plt.grid(True)
plt.xlabel('Time')
plt.ylabel('Average nr. download based on a rolling window of 30 days')
# pp.results['data'].cumsum().plot()
pp.plot_year(vmin=100)
pp.plot(vmin=25)
pp.results['data'].cumsum().plot()
# %% Plot bnlearn
results = pp.stats(repo='bnlearn')
pp.plot_year()
# %%
pp.update()
results = pp.stats()
pp.plot_year(vmin=700)
pp.plot(vmin=25)
# %% Plot
# Init
pp = Pypiplot(username='erdogant', savepath='D://REPOS/pypiplot/repo_data/')
# Get download statistics
results = pp.stats()
# Store svg on github.io
path = 'D://REPOS/erdogant.github.io/docs/imagesc/pypi/pypi_heatmap.html'
path = 'C://temp/pypi_heatmap.html'
pp.plot_year(path=path, vmin=700)
# Store all repo info in github.io
path = 'D://REPOS/erdogant.github.io/docs/imagesc/pypi/pypi_heatmap_repos.html'
pp.plot(path=path, vmin=100)
# %%
from pypiplot import Pypiplot
# results = pp.stats()
pp.stats(repo=['df2onehot','clustimage','bnlearn','distfit','pypickle','clusteval','findpeaks', 'kaplanmeier','colourmap'])
pp.plot_cal(method='mean', vmin=100)
pp.plot(method='mean')
# %%
|
[
"erdogant@gmail.com"
] |
erdogant@gmail.com
|
a60ce595e94bd01b6f46c0cb382957eebfd7ab07
|
576cc83449e10fd3f98281970c46016ea7a5aea2
|
/Tensorflow/CNN/莫烦python02.py
|
2e08d2c51048bcd31c14f4a4a131722ae38111f1
|
[] |
no_license
|
HotView/PycharmProjects
|
215ab9edd341e3293daebcf86d97537f8cd28d75
|
61393fe5ba781a8c1216a5cbe7e0d06149a10190
|
refs/heads/master
| 2020-06-02T07:41:53.608742
| 2019-11-13T08:31:57
| 2019-11-13T08:31:57
| 191,085,178
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,519
|
py
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("data",one_hot=True)
def add_layer(inputs,in_size,out_size,activaion_function = None):
Weights = tf.Variable(tf.random_normal([in_size,out_size]))
biases = tf.Variable(tf.zeros([1,out_size])+0.1)
Wx_plus_b = tf.matmul(inputs,Weights)+biases
if activaion_function is None:
outputs = Wx_plus_b
else:
outputs =activaion_function(Wx_plus_b)
return outputs
def compute_accuracy(v_xs,v_ys):
global prediction
y_pre = sess.run(prediction,feed_dict={xs:v_xs})
correct_prediction = tf.equal(tf.argmax(y_pre,1),tf.argmax(v_ys,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
result = sess.run(accuracy,feed_dict={xs:v_xs,ys:v_ys})
return result
xs = tf.placeholder(tf.float32,[None,784])
ys = tf.placeholder(tf.float32,[None,10])
# add output layer
prediction = add_layer(xs,784,10,activaion_function=tf.nn.softmax)
# error
crosss_entropy = tf.reduce_mean(-tf.reduce_sum(ys*tf.log(prediction),reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(crosss_entropy)
sess =tf.Session()
sess.run(tf.initialize_all_variables())
for i in range(5000):
batch_xs,batch_ys = mnist.train.next_batch(100)
sess.run(train_step,feed_dict={xs:batch_xs,ys:batch_ys})
if i%50==0:
print(compute_accuracy(mnist.test.images,mnist.test.labels))
|
[
"864773190@qq.com"
] |
864773190@qq.com
|
6567d0f8b19425ebfd1cd990c73c0e2498f971f2
|
41294ab88364fbb40ee67fcc643a91cc355c25d5
|
/solution/accounting.py
|
368251986f18af4b2806c42760073666909b3c70
|
[] |
no_license
|
tessajules/underpaid-customers-HB-homework
|
96e542cc736d03b1476c88c43cd931081b03926d
|
ec3526debea68ecbf7aed25d041baf26110e40b2
|
refs/heads/master
| 2021-05-28T22:11:25.106565
| 2015-04-10T02:56:36
| 2015-04-10T02:56:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 969
|
py
|
MELON_COST = 1.00
def melon_payment_calculator(payment_data):
"""Calculate cost of melons and determine who has underpaid."""
payment_data = open(payment_data)
for line in payment_data:
order = line.split('|')
customer_name = order[1]
customer_first = customer_name.split(" ")[0]
customer_melons = float(order[2])
customer_paid = float(order[3])
customer_expected = customer_melons * MELON_COST
if customer_expected < customer_paid:
print customer_name, "paid %.2f, expected %.2f" % (
customer_paid, customer_expected)
print customer_first, "has overpaid for their melons."
elif customer_expected > customer_paid:
print customer_name, "paid %.2f, expected %.2f" % (
customer_paid, customer_expected)
print customer_first, "has underpaid for their melons."
melon_payment_calculator("customer-orders.txt")
|
[
"info@hackbrightacademy.com"
] |
info@hackbrightacademy.com
|
ea8bb3f37fef6e37cd9f9274f22db69548ed5b99
|
1a59a9076c1e9f1eb98e24ff41a4c1c95e2b353e
|
/xcp2k/classes/_program_run_info36.py
|
df87e8835f3ba808b0a2fb5f2bbb04a979030521
|
[] |
no_license
|
Roolthasiva/xcp2k
|
66b2f30ebeae1a946b81f71d22f97ea4076e11dc
|
fc3b5885503c6f6dc549efeb4f89f61c8b6b8242
|
refs/heads/master
| 2022-12-23T06:03:14.033521
| 2020-10-07T08:01:48
| 2020-10-07T08:01:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 695
|
py
|
from xcp2k.inputsection import InputSection
from xcp2k.classes._each343 import _each343
class _program_run_info36(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.EACH = _each343()
self._name = "PROGRAM_RUN_INFO"
self._keywords = {'Add_last': 'ADD_LAST', 'Common_iteration_levels': 'COMMON_ITERATION_LEVELS', 'Filename': 'FILENAME', 'Log_print_key': 'LOG_PRINT_KEY'}
self._subsections = {'EACH': 'EACH'}
self._attributes = ['Section_parameters']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
acc4aef5d2a6eb365488380fe43780058d19a3d6
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/pg_1623+386/sdB_PG_1623+386_lc.py
|
79bf55d3e02753efc2d185b0d3025a46f7a7b55a
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[246.351292,38.505214], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_PG_1623+386 /sdB_PG_1623+386_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.