blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
965d835cd6e4e9bf535d757bd971a6e7081fe8bc | 8a82a83655f118208692e55d7804d9fa480ad4b6 | /book/apress/Beginning.Python.Visualization.Crafting.Visual.Transformation.Scripts/Chapter08/src/exponential.py | f18cc92e1d379da0067ba78d35d4cd6e229f293e | [] | no_license | xenron/sandbox-da-python | 0814159da9a91923e4b66c5e40057e381f765e96 | ab8f1c0d57fdc6006355f613012b84165068c315 | refs/heads/master | 2020-04-12T05:41:33.182110 | 2016-12-14T22:57:33 | 2016-12-14T22:57:33 | 60,324,979 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | from pylab import *
# number of data points
N = 100
start = 0
end = 2
A = rand()
B = rand()
# our linear line will be:
# y = B*exp(A*x) = exp(A*x + log(B))
x = linspace(start, end, N)
y = exp(A*x+B)
y += randn(N)/5
# linear regression
p = polyfit(x, log(y), 1)
figure()
title(r'Linear regression with polyfit(), $y=Be^{Ax}$')
plot(x, y, 'o',
label='Measured data; A=%.2f, B=%.2f' % (A, exp(B)))
plot(x, exp(polyval(p, x)), '-',
label='Linear regression; A=%.2f, B=%.2f' % (p[0], exp(p[1])))
legend(loc='best')
show() | [
"xenron@outlook.com"
] | xenron@outlook.com |
d97edee00b89ffead3177b443913cfa0885b63a8 | 7a0b7552bbf24dcaab5f981adc7077a642aee6ac | /week9/todo/main/urls.py | d791ddc723dfcc0b135aa8d30cebdac0fc8c2b2a | [] | no_license | Aigerimmsadir/BFDjango | b20f731796fa9df7ec021bc7a293def35df55e01 | a850713d24f50b8b70dd9f8036f77e76174f3c4e | refs/heads/master | 2020-03-27T20:29:20.543884 | 2018-11-24T13:07:39 | 2018-11-24T13:07:39 | 147,072,816 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | from django.urls import path
from . import views
urlpatterns = [
path('',views.home, name='home'),
path('home', views.index, name='index'),
path('1/completed/', views.completed_tasks,name='completed'),
path('1/incompleted/', views.incompleted_tasks,name="incompleted"),
path('1/creation_order/', views.creation_order,name='creation'),
path('1/due_order/', views.due_order,name='due_order'),
path('1/delete_task/<int:task_id>', views.delete_task,name="delete_task"),
path('1/delete_list/', views.delete_list,name="delete_list"),
path('1/task_done/<int:task_id>', views.task_done,name="task_done"),
path('1/add_task/', views.add_task,name="add_task"),
path('1/update_task/<int:task_id>',views.update_task, name='update_task')
] | [
"noreply@github.com"
] | Aigerimmsadir.noreply@github.com |
2087b62bd5e686dd6b4bf8754acc7ff7fd2c6367 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /110_concurrency_parallelism/001_asynchronous/_exercises/templates/Async Techniques and Examples in Python/09-built-on-asyncio/the_trio/prod_trio.py | a60443d598ffd2f0e29e3ab260bc0c29cf3ba3cb | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,178 | py | # ______ d..
# ______ co..
# ______ ra..
# ______ tr..
#
#
# ? ___ main
# t0 _ d_t_.d_t_.n..
# print(co__.F__.W.. + "App started.", f.._T..
#
# data _ tr__.Q.. capacity_10
#
# w__ tr__.m_o_a.. 5
# ? w__ tr__.op_n.. __ nursery:
# ?.s_s.. g_d.. 20, d.. n.._'Prod 1')
# ?.s_s.. g_d.. 20, d.. n.._'Prod 2')
# ?.s_s.. p.. 40 d.. n.._'Consumer')
#
# dt _ d_t_.d_t_.n.. - t0
# print(co__.F__.W.. + *App exiting, total time: |;,.2_ sec. .f..(
# ?.t_s.. f.._T..
#
#
# ? ___ generate_data(num ? data tr__.Q..
# ___ idx __ ra.. 1 ? + 1
# item _ i..*i..
# ? d__.p.. ? d_t_.d_t_.n..
#
# print(co__.F__.Y.. + _* -- generated item |? f.._T..
# ? tr__.sleep(ra__.ra.. + .5
#
#
# ? ___ process_data num ? data tr__.Q..
# processed _ 0
# w__ ? < ?
# item _ ? d__.g..
#
# ? +_ 1
# value _ ? 0
# t _ ? 1
# dt _ d_t_.d_t_.n.. - t
#
# print(co__.F__.C.. +
# * +++ Processed value @ after |;,.2_ sec. .f..(
# v.. ?.t_s.. f.._T..
# ? tr__.s.. .5
#
#
# __ _________ __ ________
# tr__.r.. ?
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
b6d87b84ce43f9cb8bd1f04a831ea7708ceeba1c | e111233b264eb57467ca12562a3f1d91155f0d18 | /그외/08 4주/잔다.py | 2a35ed2ab0d0d254a8b3aec4a8a422780ee8f63a | [] | no_license | 01090841589/ATM | d440a112a47937d11f4c4d8df6817a76971c0888 | 44fa856a033d15c9281d2597f1b67ee5cec09934 | refs/heads/master | 2020-07-05T13:24:35.080738 | 2019-08-29T08:33:24 | 2019-08-29T08:33:24 | 202,659,466 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,547 | py | def cal(a, b, eq):
if eq == '+':
return a + b
elif eq == '*':
return a * b
isp = {'+': 1, '*': 2, '(': 0}
nums = ['0', '1', '2', '3', '4' ,'5', '6', '7', '8', '9']
for tc in range(1, 11):
N = int(input())
arith = input()
word = []
stack = []
for letter in arith:
if letter in nums:
word.append(int(letter))
else:
if letter == '(':
stack.append('(')
elif letter == ')':
while True:
tmp = stack.pop()
if tmp == '(':
break
word.append(tmp)
else:
if stack:
if isp[stack[-1]] > isp[letter]:
tmp2 = stack[-1]
while isp[tmp2] > isp[letter]:
word.append(stack.pop())
if not stack:
break
tmp2 = stack[-1]
stack.append(letter)
else:
stack.append(letter)
else:
stack.append(letter)
for i in range(len(stack)):
word.append(stack.pop())
result = 0
cal_stack = []
for letter in word:
if type(letter) == int:
cal_stack.append(letter)
else:
result = cal(cal_stack.pop(), cal_stack.pop(), letter)
cal_stack.append(result)
print('#{} {}'.format(tc, result)) | [
"chanchanhwan@naver.com"
] | chanchanhwan@naver.com |
b6c8eb3848575a5dc835a5dab447e1c9cb28d2ec | 33524b5c049f934ce27fbf046db95799ac003385 | /2017/Turtule/lesson_7_Циклы___for/triangle.py | 02ce25a3cada3cb1377c90f019353af48c74f888 | [] | no_license | mgbo/My_Exercise | 07b5f696d383b3b160262c5978ad645b46244b70 | 53fb175836717493e2c813ecb45c5d5e9d28dd23 | refs/heads/master | 2022-12-24T14:11:02.271443 | 2020-10-04T04:44:38 | 2020-10-04T04:44:38 | 291,413,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py |
# -*- coding: utf-8 -*-
import turtle
import time
def write(data):
t.write(data,font = ("Arial",14,"normal"))
def triangle(size,cover):
#a0 = t.heading()
p0 = t.pos()
t.fd(size)
p1 = t.pos()
t.fd(-size)
t.rt(cover)
t.fd(size)
t.goto(p1)
t.goto(p0)
#t.seth(a0)
def Poligon(size,n):
ang = 0
for _ in range(n):
t.seth(ang)
triangle(size/2,360/n)
#t.fd(size)
ang += 360/n
#time.sleep(2)
t = turtle.Turtle()
t.shape("turtle")
t.color('green')
t.width(2)
#t.seth(45)
triangle(100,90)
#Poligon(200,10)
turtle.done()
| [
"mgbo433@gmail.com"
] | mgbo433@gmail.com |
ca2c1dda952a2077996a03d398489c4346274ca8 | c11123ce1e86f8306dcc3bf5d017dbfa8bb1d515 | /Medium/Combinations.py | a8b946a2426e2c441fd19569d5b149a0c76f041d | [] | no_license | uathena1991/Leetcode | 7e606c68a51ed09e6e6a9fad327b24066e92d0c4 | e807ae43a0a253deaa6c9ed1c592fa3a14a6cab8 | refs/heads/master | 2021-05-01T15:21:25.568729 | 2019-10-13T14:33:30 | 2019-10-13T14:33:30 | 74,910,747 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | class Solution(object):
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
if k == 0:
return [[]]
else:
return [prev + [i] for i in range(1,n+1) for prev in self.combine(i-1,k-1)]
a = Solution()
print a.combine(4,1) | [
"xiaoli.he@rutgers.edu"
] | xiaoli.he@rutgers.edu |
614ea8295be05bbd12c8e9489763947b7e63dea8 | eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429 | /data/output/550_original | 1f725987700a582ef6c283c92616fa22721d44aa | [] | no_license | bopopescu/pythonanalyzer | db839453bde13bf9157b76e54735f11c2262593a | 8390a0139137574ab237b3ff5fe8ea61e8a0b76b | refs/heads/master | 2022-11-22T02:13:52.949119 | 2019-05-07T18:42:52 | 2019-05-07T18:42:52 | 282,079,884 | 0 | 0 | null | 2020-07-23T23:46:09 | 2020-07-23T23:46:08 | null | UTF-8 | Python | false | false | 1,545 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreatePolicyVersionRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ram', '2015-05-01', 'CreatePolicyVersion')
self.set_protocol_type('https');
def get_PolicyName(self):
return self.get_query_params().get('PolicyName')
def set_PolicyName(self,PolicyName):
self.add_query_param('PolicyName',PolicyName)
def get_PolicyDocument(self):
return self.get_query_params().get('PolicyDocument')
def set_PolicyDocument(self,PolicyDocument):
self.add_query_param('PolicyDocument',PolicyDocument)
def get_SetAsDefault(self):
return self.get_query_params().get('SetAsDefault')
def set_SetAsDefault(self,SetAsDefault):
self.add_query_param('SetAsDefault',SetAsDefault) | [
"rares.begu@gmail.com"
] | rares.begu@gmail.com | |
0e10bb5c19776e865b5ad07011a5366bb37011b1 | 72b1d8b44520d1757d379d8013eb3912b005bef3 | /ml/text/experiment/triplet.py | fb287d3ed7426ca6b5e23c8fc9202116298de1f2 | [] | no_license | joshuaNewman10/ml | 14d8d5821bd952e77272b740cf05cef69ebee383 | 3ec43868004d421814f8e056205e77a2b8cb92dc | refs/heads/master | 2021-04-03T06:29:33.655495 | 2018-09-17T19:03:40 | 2018-09-17T19:03:40 | 124,795,304 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,156 | py | from keras import Input, Model, backend as K
from keras.layers import Lambda, Concatenate
def triplet_model(encoder, input_shape):
x_anchor = Input(shape=input_shape, name='anchor')
x_related = Input(shape=input_shape, name='related')
x_unrelated = Input(shape=input_shape, name='unrelated')
h_anchor = encoder(x_anchor)
h_related = encoder(x_related)
h_unrelated = encoder(x_unrelated)
related_dist = Lambda(euclidean_distance, name='pos_dist')([h_anchor, h_related])
unrelated_dist = Lambda(euclidean_distance, name='neg_dist')([h_anchor, h_unrelated])
inputs = [x_anchor, x_related, x_unrelated]
distances = Concatenate()([related_dist, unrelated_dist])
model = Model(inputs=inputs, outputs=distances)
return model
def triplet_loss(_, y_pred):
margin = K.constant(1)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:, 0]) - K.square(y_pred[:, 1]) + margin))
def triplet_accuracy(_, y_pred):
return K.mean(y_pred[:, 0] < y_pred[:, 1])
def euclidean_distance(vectors):
x, y = vectors
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon())) | [
"josh@teambanjo.com"
] | josh@teambanjo.com |
77425a6f9bac27b78c6d3c2f6bff38e7d5474ddd | 1adf769cf9234f9b6c619f808d2723b99451d679 | /rusentrel/rusentrel_ds/mi_att/att_hidden_z_yang.py | beb3090e90ef24dd693da28831f87b381a9f8486 | [
"MIT"
] | permissive | DAVMARROS/attitude-extraction-with-attention-and-ds | 4e85fa154ead0cd9499aaedf5d752ac565f37b92 | fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d | refs/heads/master | 2023-02-09T04:56:24.090380 | 2020-12-30T10:09:34 | 2020-12-30T10:09:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | #!/usr/bin/python
import sys
sys.path.append('../../../')
from arekit.contrib.networks.multi.configurations.att_self import AttSelfOverSentencesConfig
from arekit.contrib.networks.multi.architectures.att_self import AttSelfOverSentences
from rusentrel.mi_names import AttSelfOverInstancesModelNames
from rusentrel.rusentrel_ds.mi.att_hidden_z_yang import run_testing_mi_att_hidden_zyang
if __name__ == "__main__":
run_testing_mi_att_hidden_zyang(
model_names_classtype=AttSelfOverInstancesModelNames,
network_classtype=AttSelfOverSentences,
config_classtype=AttSelfOverSentencesConfig
)
| [
"kolyarus@yandex.ru"
] | kolyarus@yandex.ru |
1e65865dbb74cecc72da259e7421679ad6d1116e | 81e081bd18fcf4f648f50722b59c55a581306346 | /1300_K번째 수/s1.py | 1eade0d05ad15fbb779c4f21e93bcc0c01783911 | [] | no_license | Ysh096/baekjoon | 5b50ceb70c0e5a1b095dbee0542056cb31a8edaf | 54943a00e11ae4926208e51e7488cc63e6da525d | refs/heads/master | 2023-08-05T15:13:17.884639 | 2021-10-10T12:24:52 | 2021-10-10T12:24:52 | 340,568,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | # 메모리 초과
import sys
sys.stdin = open('input.txt')
N = int(input())
k = int(input())
# 인덱스 1부터 시작
# 1~N까지
B = []
for i in range(1, N+1):
for j in range(1, N+1):
B.append(i*j)
B.sort()
print(B[k])
| [
"skk7541@gmail.com"
] | skk7541@gmail.com |
b65fdf1255eb415d579598bcc480d9c998e09e75 | d3188257a2e62627744876fd17d36db7c0f1ffab | /chat_chat/chat/models.py | 82b2f0f5d5804163ae24cfdac910300fdbd5bcb8 | [
"MIT"
] | permissive | junngo/django-chat | 8e01ebc2d18f93d87c2a104703274cebe75b6a92 | 24a4ec17ade348186ab4cdaeecb60f6b69d5dce2 | refs/heads/master | 2022-11-11T17:15:46.455637 | 2020-07-02T06:14:40 | 2020-07-02T06:14:40 | 266,116,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | from django.contrib.auth import get_user_model
from django.db import models
# Create your models here.
User = get_user_model()
class Contact(models.Model):
user = models.ForeignKey(
User, related_name='me', on_delete=models.CASCADE)
friends = models.ManyToManyField(
User, related_name='friends', blank=True)
def __str__(self):
return self.user.username
class Message(models.Model):
contact = models.ForeignKey(
Contact, related_name='messages', on_delete=models.CASCADE)
message = models.TextField()
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.contact.user.username
class Room(models.Model):
participants = models.ManyToManyField(
Contact, related_name='rooms', blank=True)
messages = models.ManyToManyField(Message, blank=True)
def __str__(self):
return "{}".format(self.pk)
@property
def group_name(self):
"""
Returns the Channels Group name that sockets should subscribe to to get sent
messages as they are generated.
"""
return "room-%s" % self.id
| [
"myeongjun.ko@gmail.com"
] | myeongjun.ko@gmail.com |
996202efa4c0dd475918988a5b19e75ba8fb54dc | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-ice/aliyunsdkice/request/v20201109/ListPackageJobsRequest.py | e6443d218280892d7a6d0901902c850f1f9912e1 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 2,617 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkice.endpoint import endpoint_data
class ListPackageJobsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'ICE', '2020-11-09', 'ListPackageJobs','ice')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_NextPageToken(self): # String
return self.get_query_params().get('NextPageToken')
def set_NextPageToken(self, NextPageToken): # String
self.add_query_param('NextPageToken', NextPageToken)
def get_JobId(self): # String
return self.get_query_params().get('JobId')
def set_JobId(self, JobId): # String
self.add_query_param('JobId', JobId)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_EndOfCreateTime(self): # String
return self.get_query_params().get('EndOfCreateTime')
def set_EndOfCreateTime(self, EndOfCreateTime): # String
self.add_query_param('EndOfCreateTime', EndOfCreateTime)
def get_OrderBy(self): # String
return self.get_query_params().get('OrderBy')
def set_OrderBy(self, OrderBy): # String
self.add_query_param('OrderBy', OrderBy)
def get_StartOfCreateTime(self): # String
return self.get_query_params().get('StartOfCreateTime')
def set_StartOfCreateTime(self, StartOfCreateTime): # String
self.add_query_param('StartOfCreateTime', StartOfCreateTime)
def get_Status(self): # String
return self.get_query_params().get('Status')
def set_Status(self, Status): # String
self.add_query_param('Status', Status)
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
6d29c3ea7b9ad0d135d444d81d34f3035c44a725 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-drs/huaweicloudsdkdrs/v5/model/cloud_vpc_info.py | fe8771b44358314ee19f1d2be9fcc5b5edab143e | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 6,950 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CloudVpcInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'vpc_id': 'str',
'subnet_id': 'str',
'security_group_id': 'str'
}
attribute_map = {
'vpc_id': 'vpc_id',
'subnet_id': 'subnet_id',
'security_group_id': 'security_group_id'
}
def __init__(self, vpc_id=None, subnet_id=None, security_group_id=None):
"""CloudVpcInfo
The model defined in huaweicloud sdk
:param vpc_id: 数据库实例所在的虚拟私有云ID,获取方法如下: 方法1:登录虚拟私有云服务的控制台界面,在虚拟私有云的详情页面查找VPC ID。 方法2:通过虚拟私有云服务的API接口查询,具体操作可参考查询VPC列表。
:type vpc_id: str
:param subnet_id: 数据库实例所在子网ID,获取方法如下: 方法1:登录虚拟私有云服务的控制台界面,单击VPC下的子网,进入子网详情页面,查找网络ID。 方法2:通过虚拟私有云服务的API接口查询,具体操作可参考查询子网列表。
:type subnet_id: str
:param security_group_id: 数据库实例所在的安全组ID,获取方法如下: 方法1:登录虚拟私有云服务的控制台界面,在安全组的详情页面查找安全组ID。 方法2:通过虚拟私有云服务的API接口查询,具体操作可参考查询安全组列表。
:type security_group_id: str
"""
self._vpc_id = None
self._subnet_id = None
self._security_group_id = None
self.discriminator = None
self.vpc_id = vpc_id
self.subnet_id = subnet_id
if security_group_id is not None:
self.security_group_id = security_group_id
@property
def vpc_id(self):
"""Gets the vpc_id of this CloudVpcInfo.
数据库实例所在的虚拟私有云ID,获取方法如下: 方法1:登录虚拟私有云服务的控制台界面,在虚拟私有云的详情页面查找VPC ID。 方法2:通过虚拟私有云服务的API接口查询,具体操作可参考查询VPC列表。
:return: The vpc_id of this CloudVpcInfo.
:rtype: str
"""
return self._vpc_id
@vpc_id.setter
def vpc_id(self, vpc_id):
"""Sets the vpc_id of this CloudVpcInfo.
数据库实例所在的虚拟私有云ID,获取方法如下: 方法1:登录虚拟私有云服务的控制台界面,在虚拟私有云的详情页面查找VPC ID。 方法2:通过虚拟私有云服务的API接口查询,具体操作可参考查询VPC列表。
:param vpc_id: The vpc_id of this CloudVpcInfo.
:type vpc_id: str
"""
self._vpc_id = vpc_id
@property
def subnet_id(self):
"""Gets the subnet_id of this CloudVpcInfo.
数据库实例所在子网ID,获取方法如下: 方法1:登录虚拟私有云服务的控制台界面,单击VPC下的子网,进入子网详情页面,查找网络ID。 方法2:通过虚拟私有云服务的API接口查询,具体操作可参考查询子网列表。
:return: The subnet_id of this CloudVpcInfo.
:rtype: str
"""
return self._subnet_id
@subnet_id.setter
def subnet_id(self, subnet_id):
"""Sets the subnet_id of this CloudVpcInfo.
数据库实例所在子网ID,获取方法如下: 方法1:登录虚拟私有云服务的控制台界面,单击VPC下的子网,进入子网详情页面,查找网络ID。 方法2:通过虚拟私有云服务的API接口查询,具体操作可参考查询子网列表。
:param subnet_id: The subnet_id of this CloudVpcInfo.
:type subnet_id: str
"""
self._subnet_id = subnet_id
@property
def security_group_id(self):
"""Gets the security_group_id of this CloudVpcInfo.
数据库实例所在的安全组ID,获取方法如下: 方法1:登录虚拟私有云服务的控制台界面,在安全组的详情页面查找安全组ID。 方法2:通过虚拟私有云服务的API接口查询,具体操作可参考查询安全组列表。
:return: The security_group_id of this CloudVpcInfo.
:rtype: str
"""
return self._security_group_id
@security_group_id.setter
def security_group_id(self, security_group_id):
"""Sets the security_group_id of this CloudVpcInfo.
数据库实例所在的安全组ID,获取方法如下: 方法1:登录虚拟私有云服务的控制台界面,在安全组的详情页面查找安全组ID。 方法2:通过虚拟私有云服务的API接口查询,具体操作可参考查询安全组列表。
:param security_group_id: The security_group_id of this CloudVpcInfo.
:type security_group_id: str
"""
self._security_group_id = security_group_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CloudVpcInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
52cb29e9a1839ab9795cee5589a2043ec6281f7f | ab5634154a80272c051701597d4d8694ffdf367e | /parse_iperf.py | b71b56c1118a6384cfc3b604c7c8b8de56d03be3 | [] | no_license | WiperHung/CN-HW1 | 19ebf76767e480bc0593c559d2e7a55a4e1df604 | 6fe990a725f4c691ebefef58c7d5ca109057892d | refs/heads/main | 2023-01-07T16:53:54.759976 | 2020-11-01T12:24:48 | 2020-11-01T12:24:48 | 309,062,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,004 | py | #!/usr/bin/python
# Parse the iperf.txt files and generate new files that can be used for plotting
# throughput vs time
from argparse import ArgumentParser
import sys
import os
parser = ArgumentParser(description="iperfParser")
parser.add_argument('--n',
type=int,
help="Number of hosts",
default=5)
parser.add_argument('--dir', '-d',
help="Directory to store outputs",
required=True)
args = parser.parse_args()
for i in range(1,args.n):
fi = open("%s/iperf%d.txt"%(args.dir,i), "r")
lines_after_6 = fi.readlines()[6:]
lines_after_6 = lines_after_6[:-1]
fo = open("%s/iperf%d-plot.txt"%(args.dir,i),"w+")
for t in range(20*(i-1)):
fo.write("%d,0 \n"%t)
t = 20*(i-1)
for line in lines_after_6:
word = line.split()
fo.write("%d,%s \n"%(t,word[len(word)-2]))
t = t+1
for t in range(t,300):
fo.write("%d,0 \n"%t)
| [
"="
] | = |
6f394e0be9cc0906075814aae0d91b66285baf6c | 37b30edf9f643225fdf697b11fd70f3531842d5f | /chrome/browser/ash/DEPS | 0d81be3a8ee2bc6201ddbef230e6374ba0959fd3 | [
"BSD-3-Clause"
] | permissive | pauladams8/chromium | 448a531f6db6015cd1f48e7d8bfcc4ec5243b775 | bc6d983842a7798f4508ae5fb17627d1ecd5f684 | refs/heads/main | 2023-08-05T11:01:20.812453 | 2021-09-17T16:13:54 | 2021-09-17T16:13:54 | 407,628,666 | 1 | 0 | BSD-3-Clause | 2021-09-17T17:35:31 | 2021-09-17T17:35:30 | null | UTF-8 | Python | false | false | 2,036 | include_rules = [
# //chrome/browser/ash is conceptually part of "ash". See the "Lacros:
# ChromeOS source code directory migration" design doc at
# https://docs.google.com/document/d/1g-98HpzA8XcoGBWUv1gQNr4rbnD5yfvbtYZyPDDbkaE
"+ash",
"+chrome/browser/image_decoder",
# TODO(ananta): Remove this when we move files which display UI in
# chrome/browser/chromeos to chrome/browser/ui/views/chromeos
# crbug.com/728877
"+chrome/browser/ui/views/chrome_layout_provider.h",
"+chrome/services/keymaster/public",
"+chrome/services/wilco_dtc_supportd/public",
"+components/account_manager_core",
"+components/app_restore",
"+components/guest_os",
"+components/services/app_service/public",
"+cros",
"+dbus",
"+device/bluetooth",
"+media/audio/sounds", # For system sounds
"+media/base/media_switches.h", # For media command line switches.
"+media/mojo/mojom", # For platform verification mojom interface.
"+remoting/host/it2me", # For CRD host in remote command
"+remoting/protocol", # For CRD host in remote command
"+services/device/public",
"+services/metrics/public",
"+services/network",
"+services/tracing/public",
"+services/viz/public/mojom",
]
specific_include_rules = {
# Dependencies specific for fuzz targets and other fuzzing-related code.
".*fuzz.*": [
"+third_party/libFuzzer/src/utils", # This contains FuzzedDataProvider.
],
"assistant_util_unittest\.cc": [
"+ui/events/devices/device_data_manager.h",
],
"child_status_collector_browsertest.cc": [
"+mojo/core/embedder/embedder.h",
],
"device_status_collector_browsertest.cc": [
"+mojo/core/embedder/embedder.h",
],
"event_rewriter_unittest\.cc": [
"+ui/events/devices/device_data_manager.h",
],
"external_protocol_dialog\.cc": [
"+chrome/browser/ui/views/external_protocol_dialog.h",
],
"file_manager_browsertest_base.cc": [
"+chrome/browser/ui/views/extensions/extension_dialog.h",
"+chrome/browser/ui/views/select_file_dialog_extension.h",
],
}
| [
"chromium-scoped@luci-project-accounts.iam.gserviceaccount.com"
] | chromium-scoped@luci-project-accounts.iam.gserviceaccount.com | |
d6e40d4131a3a4c0cb562486feb1ce61eda97c54 | 6cd3355ee8286f810cd5df28baa62e5cacfd1a75 | /Advent of Code 2022/Day 1 2022/Day1Q1 2022.py | 65bafe7968e4987d44d26ffd17e5f7f44862bba1 | [] | no_license | Brian-Mascitello/Advent-of-Code | 8d8290ff8dde236a5e21e33b1a1eba05a9c8f269 | f32566fc7b30d5b83c21e8f38b50f0b37bc11135 | refs/heads/master | 2022-12-17T17:15:26.993538 | 2022-12-06T05:43:37 | 2022-12-06T05:43:37 | 112,885,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | """
Author: Brian Mascitello
Date: 12/2/2022
Websites: https://adventofcode.com/2022/day/1
Info: --- Day 1: Calorie Counting ---
"""
def get_data(input_text):
with open(input_text) as file:
data_from_file = file.read()
return data_from_file
def main():
data = get_data('Day1Q1 2022 Input.txt')
# calories_list_example = """1000
# 2000
# 3000
#
# 4000
#
# 5000
# 6000
#
# 7000
# 8000
# 9000
#
# 10000
# """
elf_dict = {}
elf_number = 1
for line in data.splitlines():
stripped_line = line.strip()
if stripped_line == '':
elf_number += 1
else:
if elf_number in elf_dict.keys():
elf_dict[elf_number] += int(stripped_line)
else:
elf_dict[elf_number] = int(stripped_line)
most_calories = max(elf_dict.values())
print(f'The most calories an elf is carrying is {most_calories}.')
if __name__ == '__main__':
main()
| [
"bmascitello@gmail.com"
] | bmascitello@gmail.com |
fbd78eba6586daa821c801b32bf9a6b63338a9f9 | 1e1cb0103d4dd15bc462962047d00f6d1349e0c5 | /boss/pages/test_page/base_page.py | cc887b80426d6b9811ab7d4f41a05c4303655065 | [] | no_license | fangmeng1991/BossCode | 9b93ae4ceaf050d4f081a2863aacb52114fafabb | 7cef7f8b3aa9d97f30a4d3cdadadfd23d8d52795 | refs/heads/master | 2020-07-23T03:58:51.259561 | 2019-09-11T01:18:20 | 2019-09-11T01:18:20 | 207,439,253 | 0 | 0 | null | 2019-09-11T01:18:21 | 2019-09-10T01:34:54 | Python | UTF-8 | Python | false | false | 5,756 | py | # coding=utf-8
import time
from selenium.common.exceptions import NoSuchElementException
import os.path
from common.logger import Logger
# create a logger instance
logger = Logger(logger="BasePage").getlog()
class BasePage(object):
"""
定义一个页面基类,让所有页面都继承这个类,封装一些常用的页面操作方法到这个类
"""
def __init__(self, driver):
self.driver = driver
# quit browser and end testing
def quit_browser(self):
self.driver.quit()
# 浏览器前进操作
def forward(self):
self.driver.forward()
logger.info("Click forward on current page.")
# 浏览器后退操作
def back(self):
self.driver.back()
logger.info("Click back on current page.")
# 隐式等待
def wait(self, seconds):
self.driver.implicitly_wait(seconds)
logger.info("wait for %d seconds." % seconds)
# 点击关闭当前窗口
def close(self):
try:
self.driver.close()
logger.info("Closing and quit the browser.")
except NameError as e:
logger.error("Failed to quit the browser with %s" % e)
# 保存图片
def get_windows_img(self):
"""
在这里我们把file_path这个参数写死,直接保存到我们项目根目录的一个文件夹.\Screenshots下
"""
file_path = os.path.dirname(os.path.abspath('.')) + '/boss/logs/picture/'
rq = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))
screen_name = file_path + rq + '.png'
try:
self.driver.get_screenshot_as_file(screen_name)
logger.info("Had take screenshot and save to folder : /boss/logs/picture/")
except NameError as e:
logger.error("Failed to take screenshot! %s" % e)
self.get_windows_img()
# 定位元素方法
def find_element(self, selector):
"""
这个地方为什么是根据=>来切割字符串,请看页面里定位元素的方法
submit_btn = "id=>su"
login_lnk = "xpath => //*[@id='u1']/a[7]" # 百度首页登录链接定位
如果采用等号,结果很多xpath表达式中包含一个=,这样会造成切割不准确,影响元素定位
:param selector:
:return: element
"""
element = ''
if '=>' not in selector:
return self.driver.find_element_by_id(selector)
selector_by = selector.split('=>')[0]
selector_value = selector.split('=>')[1]
if selector_by == "i" or selector_by == 'id':
try:
element = self.driver.find_element_by_id(selector_value)
logger.info("Had find the element \' %s \' successful "
"by %s via value: %s " % (element.text, selector_by, selector_value))
except NoSuchElementException as e:
logger.error("NoSuchElementException: %s" % e)
self.get_windows_img() # take screenshot
elif selector_by == "n" or selector_by == 'name':
element = self.driver.find_element_by_name(selector_value)
elif selector_by == "c" or selector_by == 'class_name':
element = self.driver.find_element_by_class_name(selector_value)
elif selector_by == "l" or selector_by == 'link_text':
element = self.driver.find_element_by_link_text(selector_value)
elif selector_by == "p" or selector_by == 'partial_link_text':
element = self.driver.find_element_by_partial_link_text(selector_value)
elif selector_by == "t" or selector_by == 'tag_name':
element = self.driver.find_element_by_tag_name(selector_value)
elif selector_by == "x" or selector_by == 'xpath':
try:
element = self.driver.find_element_by_xpath(selector_value)
logger.info("Had find the element \' %s \' successful "
"by %s via value: %s " % (element.text, selector_by, selector_value))
except NoSuchElementException as e:
logger.error("NoSuchElementException: %s" % e)
self.get_windows_img()
elif selector_by == "s" or selector_by == 'selector_selector':
element = self.driver.find_element_by_css_selector(selector_value)
else:
raise NameError("Please enter a valid type of targeting elements.")
return element
# 输入
def type(self, selector, text):
el = self.find_element(selector)
el.clear()
try:
el.send_keys(text)
logger.info("Had type \' %s \' in inputBox" % text)
except NameError as e:
logger.error("Failed to type in input box with %s" % e)
self.get_windows_img()
# 清除文本框
def clear(self, selector):
el = self.find_element(selector)
try:
el.clear()
logger.info("Clear text in input box before typing.")
except NameError as e:
logger.error("Failed to clear in input box with %s" % e)
self.get_windows_img()
# 点击元素
def click(self, selector):
el = self.find_element(selector)
try:
el.click()
logger.info("The element \' %s \' was clicked." % el.text)
except NameError as e:
logger.error("Failed to click the element with %s" % e)
# 获得网页标题
def get_page_title(self):
logger.info("Current page title is %s" % self.driver.title)
return self.driver.title
@staticmethod
def sleep(seconds):
time.sleep(seconds)
logger.info("Sleep for %d seconds" % seconds) | [
"123@qq.com"
] | 123@qq.com |
f2d0097a4cdaa88deee9b0d582ac0bdba358b426 | e1950865f000adc926f228d84131e20b244b48f6 | /python/Array/Grid.py | 5fd43c30ed681fb3091576a860777c929f97caf1 | [] | no_license | manastole03/Programming-practice | c73859b13392a6a1036f557fa975225672fb1e91 | 2889dc94068b8d778f6b0cf516982d7104fa2318 | refs/heads/master | 2022-12-06T07:48:47.237014 | 2020-08-29T18:22:59 | 2020-08-29T18:22:59 | 281,708,273 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | w=int(input('Enter width of grid: '))
h=int(input('Enter heightof grid: '))
for i in range(h):
for j in range(w):
print('-',end=' ')
print()
| [
"noreply@github.com"
] | manastole03.noreply@github.com |
8a3ae94a5be0277d4287ad3cf4c4378be3250295 | c39566ee9b2e9825f8b9cca5d04a97ee123ab0d4 | /src/search/urls.py | 21fb622c1b35bfd9cd9195f671ddc540b0150072 | [] | no_license | bekbossyn/ecommerce_old | 58a3c0fd4ecea139272baa39f6bbe57fa46751f6 | 2da1ed7e231d932a5a894183ded79eee6ce65497 | refs/heads/master | 2021-09-07T16:44:25.320360 | 2018-02-26T07:51:07 | 2018-02-26T07:51:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | from django.conf.urls import url
from .views import (
SearchProductView
)
urlpatterns = [
url(r'^$', SearchProductView.as_view(), name="list"),
]
| [
"bekbossyn.kassymkhan@gmail.com"
] | bekbossyn.kassymkhan@gmail.com |
47733d507b71fe40aba19be739e9709a96d240c8 | 99e44f844d78de330391f2b17bbf2e293bf24b1b | /pytorch/test/test_c10d_spawn.py | 8004ed4d2206cd6e7530dfcc840d4efbbdf9f5df | [
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | raghavnauhria/whatmt | be10d57bcd6134dd5714d0c4058abd56a1b35a13 | c20483a437c82936cb0fb8080925e37b9c4bba87 | refs/heads/master | 2022-12-04T05:39:24.601698 | 2019-07-22T09:43:30 | 2019-07-22T09:43:30 | 193,026,689 | 0 | 1 | MIT | 2022-11-28T17:50:19 | 2019-06-21T03:48:20 | C++ | UTF-8 | Python | false | false | 7,670 | py | import sys
import tempfile
import unittest
import torch
import torch.distributed as c10d
import torch.multiprocessing as mp
from common_cuda import TEST_MULTIGPU
from common_utils import TestCase, load_tests, run_tests
from common_utils import NO_MULTIPROCESSING_SPAWN
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
if not c10d.is_available():
print('c10d not available, skipping tests')
sys.exit(0)
if NO_MULTIPROCESSING_SPAWN:
print('spawn not available, skipping tests')
sys.exit(0)
NO_NCCL = not hasattr(c10d, "ProcessGroupNCCL")
class ProcessGroupShareTensorTest(TestCase):
world_size = 2
@classmethod
def opts(cls, threads=2):
opts = c10d.ProcessGroupGloo.Options()
opts.devices = [c10d.ProcessGroupGloo.create_tcp_device(interface="lo")]
opts.timeout = 5.0
opts.threads = threads
return opts
@classmethod
def _init_pg_gloo(cls, rank, filename, world_size):
store = c10d.FileStore(filename, world_size)
return c10d.ProcessGroupGloo(
store, rank, world_size, ProcessGroupShareTensorTest.opts())
@classmethod
def _init_pg_nccl(cls, rank, filename, world_size):
store = c10d.FileStore(filename, world_size)
return c10d.ProcessGroupNCCL(store, rank, world_size)
def _test_multiprocess(self, f, shared_tensors, init_pg, n_output):
ws = self.world_size
# file store will delete the test file on destruction
file = tempfile.NamedTemporaryFile(delete=False)
ctx = mp.get_context('spawn')
c2p = ctx.Queue(2)
p2c = ctx.Queue(2)
ps = []
for i in range(ws):
p = ctx.Process(
target=f,
args=(i, file.name, shared_tensors, ws, init_pg, c2p, p2c))
p.start()
ps.append(p)
for _ in range(ws * n_output):
pid, expected, result = c2p.get()
self.assertEqual(
expected,
result,
(
"Expect rank {} to receive tensor {} but got {}."
).format(pid, expected, result)
)
for _ in range(ws):
p2c.put(0)
for p in ps:
p.join(2)
# Why classmethod? multiprocessing cannot pickle TestCase subclass when in
# spawn mode. See https://bugs.python.org/issue33884.
@classmethod
def _test_broadcast_process(
cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c):
pg = init_pg(rank, filename, world_size)
xs = [shared_tensors[rank]]
pg.broadcast(xs).wait()
c2p.put((rank, torch.zeros(2, 2), xs[0].to("cpu")))
p2c.get()
@unittest.skipIf(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
def test_shared_broadcast_gloo(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_broadcast_process,
[torch.ones(2, 2).to(i) * i for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_gloo,
1)
@unittest.skipIf(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
@unittest.skipIf(NO_NCCL, "NCCL needed")
def test_shared_broadcast_nccl(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_broadcast_process,
[torch.ones(2, 2).to(i) * i for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_nccl,
1)
@classmethod
def _test_allreduce_process(
cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c):
pg = init_pg(rank, filename, world_size)
xs = [shared_tensors[rank]]
pg.allreduce(xs, op=c10d.ReduceOp.SUM).wait()
c2p.put((rank, torch.ones(2, 2) * 2, xs[0].to("cpu")))
p2c.get()
@unittest.skipIf(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
def test_shared_allreduce_gloo(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_allreduce_process,
[torch.ones(2, 2).to(i) for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_gloo,
1)
@unittest.skipIf(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
@unittest.skipIf(NO_NCCL, "NCCL needed")
def test_shared_allreduce_nccl(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_allreduce_process,
[torch.ones(2, 2).to(i) for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_nccl,
1)
@classmethod
def _test_reduce_process(
cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c):
pg = init_pg(rank, filename, world_size)
x = shared_tensors[rank]
pg.reduce(x, root=0, op=c10d.ReduceOp.SUM).wait()
if rank == 0:
c2p.put((rank, torch.ones(2, 2) * 2, x.to("cpu")))
else:
c2p.put((rank, torch.ones(2, 2), x.to("cpu")))
p2c.get()
@unittest.skipIf(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
@unittest.skipIf(NO_NCCL, "NCCL needed")
def test_shared_reduce_nccl(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_reduce_process,
[torch.ones(2, 2).to(i) for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_nccl,
1)
@classmethod
def _test_allgather_process(
cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c):
pg = init_pg(rank, filename, world_size)
xs = [shared_tensors[rank]]
ys = [[torch.zeros_like(xs[0]) for i in range(world_size)]]
pg.allgather(ys, xs).wait()
for i in range(world_size):
c2p.put((rank, torch.ones(2, 2) * i, ys[0][i].to("cpu")))
p2c.get()
@unittest.skipIf(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
def test_shared_allgather_gloo(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_allgather_process,
[torch.ones(2, 2).to(i) * i for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_gloo,
self.world_size)
@unittest.skipIf(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
@unittest.skipIf(NO_NCCL, "NCCL needed")
def test_shared_allgather_nccl(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_allgather_process,
[torch.ones(2, 2).to(i) * i for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_nccl,
self.world_size)
@classmethod
def _test_allgather_chunk_process(
cls, rank, filename, shared_tensor, world_size, init_pg, c2p, p2c):
pg = init_pg(rank, filename, world_size)
chunks = torch.chunk(shared_tensor, world_size, dim=0)
x = chunks[rank]
ys = [torch.zeros_like(x) for _ in range(world_size)]
pg.allgather(ys, x).wait()
c2p.put((rank, chunks[0].to("cpu"), ys[0].to("cpu")))
c2p.put((rank, chunks[1].to("cpu"), ys[1].to("cpu")))
p2c.get()
@unittest.skipIf(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
def test_shared_allgather_chunk_gloo(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_allgather_chunk_process,
torch.tensor(range(4)).reshape(2, 2),
ProcessGroupShareTensorTest._init_pg_gloo,
self.world_size)
if __name__ == '__main__':
run_tests()
| [
"rnauhria@gmail.com"
] | rnauhria@gmail.com |
c441939749afe4e4ef03ce98b11f3db08171ecab | e393789a7b3e7cb50e3c6192843490b313004b51 | /interpreter/BSL_Expr/Variable.py | 589afb8d5d5da005c961709a99a6d7100c72597f | [] | no_license | migeed-z/Lisp_interpreter | 5e9694279169a924c864c017ff65e59fa25c0242 | 5992627e1cff299c5f3ed14064e499b9296fbc5f | refs/heads/master | 2021-01-19T03:39:15.280390 | 2016-07-27T17:40:58 | 2016-07-27T17:40:58 | 36,232,933 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | import DirPaths
from BSLExpr import BSLExpr
from BSLError import BSLError
from Global_Scope import foo
class Variable(BSLExpr):
"""
To represent a class of Variables
"""
def __init__(self, name):
"""
:param name: String representing the name of the variable
"""
self.name = name
def eval_internal(self, defs):
val = defs.get(self.name)
if not val:
val_again = foo.getter().get(self.name)
if not val_again:
raise BSLError('Variable not defined')
else:
return val_again
else:
return val
def equals(self, other):
if not isinstance(other, Variable):
return False
else:
return self.name == other.name
def __eq__(self, other):
if not isinstance(other, Variable):
return False
else:
return other.name == self.name
def __str__(self):
return '%s(%s)' % ('Variable', self.name) | [
"migeed.z@outlook.com"
] | migeed.z@outlook.com |
d01a80ae4f6132dc1b95215bb5c4bd2e8dd5965d | 2f4184af31121fd31b397d5b529b795774b30856 | /backend/users/migrations/0002_auto_20200103_1043.py | afc8436f26bbd91b9e44b910a9d5eeb0c2462314 | [] | no_license | crowdbotics-apps/mobileappdeploy-dev-1434 | c0944b53808fbb02ff664a8f2bd012648fc9e352 | 5a9bcdda33a473174be7f0446e4109c1d8d2c383 | refs/heads/master | 2022-03-28T11:20:26.124765 | 2020-01-15T10:40:11 | 2020-01-15T10:40:11 | 231,558,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | # Generated by Django 2.2.9 on 2020-01-03 10:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("users", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="user",
name="name",
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
cb78e9bfef4bda7c3de7f5f4cd7cb8b0170a5eac | bf5935cecf1b65461b1de9a67a013f0b03c3d1d0 | /boutique/migrations/0050_wishlist_product.py | 26d0cd8c9c65dd268439f466a6ceae86ac5b44bc | [] | no_license | meriemay/Shop | 53287aab6382163e6069130c8e5304ed7ffd0e3b | f5d44193e030c4ef9b5cf27896754767beaee3ef | refs/heads/master | 2021-01-18T17:50:49.688779 | 2017-08-29T14:34:34 | 2017-08-29T14:34:34 | 100,497,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-28 08:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('boutique', '0049_auto_20170825_1255'),
]
operations = [
migrations.AddField(
model_name='wishlist',
name='product',
field=models.ManyToManyField(blank=True, null=True, to='boutique.Product'),
),
]
| [
"you@example.com"
] | you@example.com |
9f381d38ffe18786f175c6900088a7afe97ad4c0 | eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429 | /data/output/375_processed | 681f703fc07d0c89061c242532b1892f7665f4f2 | [] | no_license | bopopescu/pythonanalyzer | db839453bde13bf9157b76e54735f11c2262593a | 8390a0139137574ab237b3ff5fe8ea61e8a0b76b | refs/heads/master | 2022-11-22T02:13:52.949119 | 2019-05-07T18:42:52 | 2019-05-07T18:42:52 | 282,079,884 | 0 | 0 | null | 2020-07-23T23:46:09 | 2020-07-23T23:46:08 | null | UTF-8 | Python | false | false | 958 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that list xcode_settings are flattened before being exported to the
environment.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
foo("This test is currently disabled: https://crbug.com/483696.")
sys.foo(0)
test = TestGyp.foo(formats=['ninja', 'make', 'xcode'])
CHDIR = 'non-strs-flattened-to-env'
INFO_PLIST_PATH = 'Test.app/Contents/Info.plist'
test.foo('test.gyp', chdir=CHDIR)
test.foo('test.gyp', test.ALL, chdir=CHDIR)
info_plist = test.foo(INFO_PLIST_PATH, chdir=CHDIR)
test.foo(info_plist)
test.foo(info_plist, '''\
\t<key>My Variable</key>
\t<string>some expansion</string>''')
test.foo(info_plist, '''\
\t<key>CFlags</key>
\t<string>-fstack-protector-all -fno-strict-aliasing -DS="A Space"</string>''')
test.foo()
| [
"rares.begu@gmail.com"
] | rares.begu@gmail.com | |
05d60c2656e98a7fcf8f29e8cc2fdaf391b8b3c8 | 24a13b07623ce41e57ea4df1fcce0befb14d3406 | /model/densenet201/model34_val3.py | b5f3c8ab8fdd3b376f4e844f5fc00d552e6f292e | [
"MIT"
] | permissive | shorxp/jd-fashion | 5f37e6c2235200944869e9a0da4d741c89d63b9e | 817f693672f418745e3a4c89a0417a3165b08130 | refs/heads/master | 2021-09-22T18:40:13.030601 | 2018-09-13T13:50:05 | 2018-09-13T13:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,298 | py | """
以model1为原型,新增real crop
"""
import math
import os
import queue
import time
import keras
from keras.layers import Dense, BatchNormalization, Activation
import config
from util import data_loader
from util import keras_util
from util.keras_util import KerasModelConfig
model_config = KerasModelConfig(k_fold_file="1.txt",
model_path=os.path.abspath(__file__),
image_resolution=224,
data_type=[config.DATA_TYPE_ORIGINAL],
label_up_sampling=[100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
label_position=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
label_color_augment=[0, 3, 6, 12],
downsampling=0.5,
train_batch_size=[16, 16, 16],
val_batch_size=256,
predict_batch_size=256,
epoch=[1, 4, 10],
lr=[0.0005, 0.00005, 0.000005],
data_visualization=True,
tta_flip=True,
freeze_layers=[-1, 0.6, 5])
def get_model(freeze_layers=-1, lr=0.01, output_dim=1, weights="imagenet"):
base_model = keras.applications.DenseNet201(include_top=False, weights=weights,
input_shape=model_config.image_shape, pooling="avg")
x = base_model.output
x = Dense(256, use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
predictions = Dense(units=output_dim, activation='sigmoid')(x)
model = keras.Model(inputs=base_model.input, outputs=predictions)
if freeze_layers == -1:
print("freeze all basic layers, lr=%f" % lr)
for layer in base_model.layers:
layer.trainable = False
else:
if freeze_layers < 1:
freeze_layers = math.floor(len(base_model.layers) * freeze_layers)
for layer in range(freeze_layers):
base_model.layers[layer].train_layer = False
print("freeze %d basic layers, lr=%f" % (freeze_layers, lr))
model.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.Adam(lr=lr))
# model.summary()
print("basic model have %d layers" % len(base_model.layers))
return model
def train():
evaluate_queue = queue.Queue()
evaluate_task = keras_util.EvaluateTask(evaluate_queue)
evaluate_task.setDaemon(True)
evaluate_task.start()
checkpoint = keras_util.EvaluateCallback(model_config, evaluate_queue)
start = time.time()
model_config.save_log("####### start train model")
init_stage = model_config.get_init_stage()
model_config.save_log("####### init stage is %d" % init_stage)
for i in range(init_stage, len(model_config.epoch)):
model_config.save_log("####### lr=%f, freeze layers=%2f epoch=%d" % (
model_config.lr[i], model_config.freeze_layers[i], model_config.epoch[i]))
clr = keras_util.CyclicLrCallback(base_lr=model_config.lr[i], max_lr=model_config.lr[i] * 5,
step_size=model_config.get_steps_per_epoch(i) / 2)
train_flow = data_loader.KerasGenerator(model_config=model_config,
featurewise_center=True,
featurewise_std_normalization=True,
width_shift_range=0.15,
height_shift_range=0.1,
horizontal_flip=True,
real_transform=True,
rescale=1. / 256).flow_from_files(model_config.train_files, mode="fit",
target_size=model_config.image_size,
batch_size=
model_config.train_batch_size[i],
shuffle=True,
label_position=model_config.label_position)
if i == 0:
model_config.save_log("####### initial epoch is 0, end epoch is %d" % model_config.epoch[i])
model = get_model(freeze_layers=model_config.freeze_layers[i], lr=model_config.lr[i],
output_dim=len(model_config.label_position))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
workers=16,
verbose=1,
callbacks=[checkpoint, clr])
else:
model = get_model(freeze_layers=model_config.freeze_layers[i], output_dim=len(model_config.label_position),
lr=model_config.lr[i], weights=None)
if i == init_stage:
model_config.save_log("####### load weight file: %s" % model_config.get_weights_path(model_config.initial_epoch))
model.load_weights(model_config.get_weights_path(model_config.initial_epoch))
model_config.save_log("####### initial epoch is %d, end epoch is %d" % (
model_config.initial_epoch, model_config.epoch[i]))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
initial_epoch=model_config.initial_epoch,
workers=16,
verbose=1,
callbacks=[checkpoint, clr])
else:
model_config.save_log("####### load weight file: %s" % model_config.get_weights_path(model_config.epoch[i - 1]))
model.load_weights(model_config.get_weights_path(model_config.epoch[i - 1]))
model_config.save_log(
"####### initial epoch is %d, end epoch is %d" % (model_config.epoch[i - 1], model_config.epoch[i]))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
initial_epoch=model_config.epoch[i - 1],
workers=16,
verbose=1,
callbacks=[checkpoint, clr])
model_config.save_log("####### train model spend %d seconds" % (time.time() - start))
model_config.save_log("####### train model spend %d seconds average" % ((time.time() - start) / model_config.epoch[-1]))
| [
"13658247573@163.com"
] | 13658247573@163.com |
fb9de33dcb80016f6158f209087b5ae75fde255b | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/GFocalV2/mmdet/models/detectors/__init__.py | ed9cd584a6fe22550b3a4223bcf5df0888522c82 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 1,718 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .atss import ATSS
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .cornernet import CornerNet
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .nasfcos import NASFCOS
from .paa import PAA
from .point_rend import PointRend
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .single_stage import SingleStageDetector
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'FastRCNN', 'FasterRCNN', 'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade',
'RetinaNet', 'FCOS', 'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector',
'FOVEA', 'FSAF', 'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA',
'YOLOV3', 'YOLACT', 'VFNet'
]
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
2e630d4a8372fcdc894eec1be1721f1cfc21972e | d3e6d6555b0314936902727af36de2f1b7432bf8 | /subsets-ii/subsets-ii.py | 333efee762db057e3ce4afb4fc9b0facc37e122e | [] | no_license | fly2rain/LeetCode | 624b1e06e1aa3174dfb5c81834b58cc8fd7ad073 | 4ddb5a051c6e2051f016a675fd2f5d566c800c2a | refs/heads/master | 2021-01-18T03:12:22.402044 | 2015-12-28T04:31:19 | 2015-12-28T04:31:19 | 85,842,050 | 0 | 1 | null | 2017-03-22T15:05:20 | 2017-03-22T15:05:19 | null | UTF-8 | Python | false | false | 1,312 | py |
class Solution(object):
def subsetsWithDup(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if not nums:
return [[]]
def sort_counter():
d = {}
for n in nums:
d[n] = d.get(n, 0) + 1
skey = sorted(d.keys())
return skey, d
skey, d = sort_counter()
def ksubsets(k, start, end):
if k == 0:
return [[]]
elif k == 1:
return [[skey[i]] for i in range(start, end+1)]
elif start == end:
if d[skey[start]] >= k:
return [[skey[start]] * k]
else:
return []
else:
ret = []
pivot = skey[start]
for j in range(1+min(d[pivot], k)):
ret += [[pivot] * j + l for l in ksubsets(k-j, start+1, end)]
return ret
return reduce(lambda x, y: x + y,
(ksubsets(i, 0, len(skey)-1) for i in range(len(nums)+1)), [])
if __name__ == '__main__':
# print Solution().subsetsWithDup([1,2,2,4])
# print Solution().subsetsWithDup([1,2,2])
l = Solution().subsetsWithDup([1,2,2,3,3,4,4,4,6])
print l
| [
"xuzheng1111@gmail.com"
] | xuzheng1111@gmail.com |
b801d42c1ee4294fcc6d73cda3b9f82fd3ad45e1 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/278/72086/submittedfiles/testes.py | c60d08e3c84dcd4c77f3b9a3deb528914f847acd | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | a = 30
b = 5
c = 10
if a<b:
print"comando 1"
else :
if a<c:
print"comando 2"
else:
if b<c:
print"comando 3"
print"pronto!" | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
a5765a7098151bdad915d3383c81270bd45cf115 | f34dc191304f0c54527948aa7b7123fd6efe85b9 | /connect.py | e0c1778b8defe4e2ecb40504dc48d2b47a36deae | [] | no_license | sujith1919/groza | b3fc4641de48423da9a219c33e390ea2c4915687 | 5b68e052266d5307a0058d7031b3b20c4a1b9bcb | refs/heads/master | 2023-02-28T03:09:51.568592 | 2021-02-02T16:34:49 | 2021-02-02T16:34:49 | 335,353,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | #!/usr/bin/python
import psycopg2
from config import config
def connect():
""" Connect to the PostgreSQL database server """
conn = None
try:
# read connection parameters
params = config()
# connect to the PostgreSQL server
print('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(**params)
# create a cursor
cur = conn.cursor()
# execute a statement
#print('PostgreSQL database version:')
cur.execute('SELECT version()')
cur.execute('SELECT * from LIFEBOAT')
# display the PostgreSQL database server version
db_version = cur.fetchall()
print(db_version)
# close the communication with the PostgreSQL
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print('Database connection closed.')
if __name__ == '__main__':
connect()
print("lol")
| [
"jayarajan.sujith@oracle.com"
] | jayarajan.sujith@oracle.com |
9f493953956592c94d7fa5dfb412e0a3d595dd40 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /rFK7WftrcrEu6rbu8_20.py | 478746c1b355f82bace8cd65b2e14e906b2b1d3e | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,736 | py | """
Given a **Binary Search Tree** (BST) implementation, complete the traverse
function which is present in the BST class. Here you have to perform the
level-order traversal on BST which is another term for **Breadth First
Traversal**.
### Examples
traverse() ➞ [10, 4, 20, 1, 5]
10
/ \
4 20
/ \
1 5
traverse() ➞ [100, 70, 200, 34, 80, 300]
100
/ \
70 200
/ \ \
34 80 300
### Notes
Make sure you don't modify the code that is already in the **Code** tab. Only
complete the `traverse()` function and return an array.
"""
from collections import deque
# Please don't modify the code below the traverse function is in BST class
# Node class
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
# BST Class
class BST:
def __init__(self):
self.head = None
def insert(self, data):
new_node = Node(data)
if self.head == None:
self.head = new_node
else:
current = self.head
while True:
if data > current.data and current.right:
current = current.right
elif data < current.data and current.left:
current = current.left
elif data > current.data:
current.right = new_node
break
else:
current.left = new_node
break
return self.head
def traverse(self):
res = []
q = deque()
if self.head:
q.append(self.head)
while q:
cur = q.popleft()
res.append(cur.data)
for child in [cur.left, cur.right]:
if child:
q.append(child)
return res
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
59123f8e441542d1a5ab991404a47831d48ce76d | 5ebb37aa988cbff11eb7d537298f5caec32bf79b | /docs/source/with_sidebar/conf.py | 7230a34f5dd17848d3c6fe8fae9b85251e68dfda | [] | no_license | gabrielfalcao/sphinx-bulma-theme | 07468b853d648d0c3fdf2df75f325e89c2d26d4f | 90f04d0a964d1d35b17ea3efc6e00d1692a8d96b | refs/heads/master | 2020-03-13T13:52:02.093165 | 2018-04-30T04:55:58 | 2018-04-30T04:55:58 | 131,147,099 | 6 | 0 | null | 2019-12-20T17:50:05 | 2018-04-26T11:41:16 | CSS | UTF-8 | Python | false | false | 5,075 | py | # -*- coding: utf-8 -*-
import sys
try:
from pathlib2 import Path
except ImportError:
from pathlib import Path
project_path = Path(__file__).absolute().parent.joinpath('../../..')
sys.path.insert(0, project_path.as_posix())
import sphinx_bulma_theme # noqa
project = 'Sphinx Bulma Theme'
copyright = '2018, Gabriel Falcao'
author = 'Gabriel Falcao'
version = sphinx_bulma_theme.version
release = version
needs_sphinx = '1.7.1'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosectionlabel',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.githubpages',
'sphinx.ext.ifconfig',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinxcontrib.httpdomain',
'sphinxcontrib.mermaid',
'sphinx-jsonschema',
]
html_theme = "bulma"
html_theme_path = [sphinx_bulma_theme.get_html_theme_path()]
html_theme_options = {
'show_topbar': False,
'logo_path': 'logo.png',
'analytics_id': None,
'breadcrumbs_at_top': True,
'canonical_url': None,
'collapse_navigation': False,
'content_margin_left': None,
'content_padding_left': None,
'content_padding_top': None,
'display_version': True,
'logo_only': False,
'navigation_depth': 4,
'prev_next_buttons_location': 'bottom',
'sidebar_class': 'has-text-dark',
'sidebar_container_class': 'is-3',
'sidebar_right': None,
'sidebar_style': None,
'sticky_navigation': True,
}
html_static_path = ['_static']
pygments_style = 'friendly'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'SphinxBulmaThemedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'SphinxBulmaTheme.tex', 'Sphinx Bulma Theme Documentation',
'Gabriel Falcão', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sphinxbulmatheme', 'Sphinx Bulma Theme Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'SphinxBulmaTheme', 'Sphinx Bulma Theme Documentation',
author, 'SphinxBulmaTheme', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'sphinx': ('http://www.sphinx-doc.org/en/master', None),
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| [
"gabriel@nacaolivre.org"
] | gabriel@nacaolivre.org |
7d2e9e462d909fa1e72fc960b036bbe98ba5cdca | 42a9a9ae2951a32bf181afd9edbcf7e6716dbcc8 | /server.py | e5cfec6b6537ca36b0b484d62fdbdf47f7709e23 | [] | no_license | mariiagracheva/flask | 0608eee43e9ded1296e25862ea38494e438ba4fe | aa02b37f5c1619f732e6d21d8995553bed2ffd44 | refs/heads/master | 2021-01-11T17:22:47.997442 | 2017-01-23T04:14:57 | 2017-01-23T04:14:57 | 79,769,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,654 | py | from flask import Flask, request, render_template, flash, redirect
from flask_debugtoolbar import DebugToolbarExtension
import jinja2
import locale
app = Flask(__name__)
# Required to use Flask sessions and the debug toolbar
app.secret_key = "ABC"
JOBS = ['', 'Software Engineer', 'QA Engineer', 'Product Manager']
# YOUR ROUTES GO HERE
@app.route("/")
def start():
"""Home page, template index.html"""
return render_template("index.html")
@app.route("/application-form")
def form():
"""Application form page"""
return render_template("application-form.html", jobs=JOBS)
@app.route("/application-success", methods=["POST"])
def success():
"""Summary from submitted application form"""
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
firstname = request.form.get('firstname')
lastname = request.form.get('lastname')
salary = locale.currency(int(request.form.get('salary')), symbol=True, grouping=True)
job = request.form.get('job')
if firstname == "" or lastname == "" or salary == "" or job == "":
return render_template("application-form.html", jobs=JOBS)
else:
return render_template("application-response.html",
firstname=firstname,
lastname=lastname,
salary=salary,
job=job)
if __name__ == "__main__":
# We have to set debug=True here, since it has to be True at the
# point that we invoke the DebugToolbarExtension
app.debug = True
# Use the DebugToolbar
DebugToolbarExtension(app)
app.run(host="0.0.0.0")
| [
"vagrant@vagrant.vm"
] | vagrant@vagrant.vm |
3443fafba6cbda0baf4361654caae167bcb39623 | 92137962a84e724df31b63367854349a875f1c43 | /tapis_cli/commands/taccapis/v2/files/formatters.py | 4655b555819846a123c2173baea50413542a73f3 | [
"BSD-3-Clause"
] | permissive | TACC-Cloud/tapis-cli | e3a26e79a20d1ada4cb2dc9ef204cae3e385bfe7 | d34e8635d3dbacc8276cf52b6bae04caacd655de | refs/heads/main | 2023-04-08T14:47:27.707885 | 2022-02-13T17:43:26 | 2022-02-13T17:43:26 | 203,083,094 | 11 | 3 | BSD-3-Clause | 2022-04-01T20:23:23 | 2019-08-19T02:21:28 | Python | UTF-8 | Python | false | false | 1,439 | py | """Formatters customized for system records and listings
"""
from tapis_cli.commands.taccapis.formatters import (TaccApisCommandBase,
TaccApisFormatOne,
TaccApisFormatMany)
from tapis_cli.clients.services.mixins import ParserExtender
from tapis_cli.utils import humanize_bytes
from .models import File
__all__ = [
'FilesFormatOne', 'FilesFormatMany', 'FilesHistoryFormatMany',
'FilesPemsFormatMany'
]
class FilesBase(TaccApisCommandBase):
service_id_type = File.service_id_type
def render_extended_parser_value(self,
key,
value,
parsed_args,
formatter=None):
if formatter == 'table':
if key in ('lastModified',
'lastUpdated') and parsed_args.ls_humanized:
return key, value
if key in ('length', 'size') and parsed_args.ls_humanized:
return key, humanize_bytes(value)
else:
return key, value
class FilesFormatOne(FilesBase, TaccApisFormatOne):
pass
class FilesFormatMany(FilesBase, TaccApisFormatMany):
pass
class FilesHistoryFormatMany(FilesBase, TaccApisFormatMany):
pass
class FilesPemsFormatMany(FilesBase, TaccApisFormatMany):
pass
| [
"vaughn@tacc.utexas.edu"
] | vaughn@tacc.utexas.edu |
e53ecec904462ec1004ff50bab6c0f5e0314dfe2 | a140fe192fd643ce556fa34bf2f84ddbdb97f091 | /.history/quiz08_20200709143734.py | a40bb14de63318c006975bfed6d4709c4dd80323 | [] | no_license | sangha0719/py-practice | 826f13cb422ef43992a69f822b9f04c2cb6d4815 | 6d71ce64bf91cc3bccee81378577d84ba9d9c121 | refs/heads/master | 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | # Quiz) 주어진 코드를 활용하여 부동산 프로그램을 작성하시오.
# (출력 예제)
# 총 3대의 매물이 있습니다.
# 강남 아파트 매매 10억 2010년
# 마포 오피스텔 전세 5억 2007년
# 송파 빌라 월세 500/50 2000년
# [코드]
class House:
# 매물 초기화
def __init__(self, location, house_type, deal_type, price, completion_year):
self.location = location
self.house_type = house_type
self.deal_type = deal_type
self.price = price
self.completion_year = completion_year
# 매물 정보 표시
def show_detail(self):
print(self.location, self.house_type, self.deal_type,
self.price, self.completion_year)
house = []
house1 = House("강남", "아파트", "매매", "10억", "2010년")
house2 = House("마포", "오피스텔", "전세", "5억", "2007년")
house3 = House("송파", "빌라", "월세", "500/50", "2000년")
houses.append(house1)
houses.append(house2)
houses.append(house3)
print("총 {0}의 매물이 있습니다.".format(len(house)))
for house in houses:
house.show_()
| [
"sangha0719@gmail.com"
] | sangha0719@gmail.com |
bb2663ce1944d79d1b3a417b9622565af2486c22 | d458c71e75274914ace770e0a0d21af9d4e8f450 | /config/urls.py | 1a0dba76a81e5dd607ceea614e662ad62ad9dc76 | [
"MIT"
] | permissive | robertatakenaka/greetings | c5464e1d3802454c9c62a029340f51363a720f43 | e0537ddb8a942c985a15bbf9a30ef1c49d93a757 | refs/heads/master | 2021-08-11T13:06:12.963439 | 2017-11-01T11:15:52 | 2017-11-01T11:15:52 | 109,040,213 | 0 | 0 | null | 2017-11-01T11:15:53 | 2017-10-31T19:00:33 | Python | UTF-8 | Python | false | false | 1,549 | py | from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('greetings.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| [
"roberta.takenaka@scielo.org"
] | roberta.takenaka@scielo.org |
baf719f820c2c4aaa3822e20f63646d0cbdc1bfa | 5ba903c7907124c287e46019df0d30533f270a8c | /detector_caffe/detect.py | b861f3ba14f8534b076322d94573179e58869512 | [] | no_license | Jeffin-Studios/tracker | 2e9295651fec020489a9e55b9f2da5971754870b | 98daeaddecc6a82cfa851d6841a355d40026b02d | refs/heads/master | 2020-03-28T22:09:29.819043 | 2018-09-18T01:14:54 | 2018-09-18T01:14:54 | 149,209,024 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,474 | py | # Uses opencv DNN module (only works with caffe and toch models, not tensorflow yet)
# usage: python detect1.py --prototxt MobileNetSSD_deploy.prototxt.txt --model MobileNetSSD_deploy.caffemodel
# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True,
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,
help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.2,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
# initialize the list of class labels MobileNet SSD was trained to
# detect, then generate a set of bounding box colors for each class
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
# initialize the video stream, allow the cammera sensor to warmup,
# and initialize the FPS counter
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
fps = FPS().start()
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=1000)
# grab the frame dimensions and convert it to a blob
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
0.007843, (300, 300), 127.5)
# pass the blob through the network and obtain the detections and
# predictions
net.setInput(blob)
detections = net.forward()
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence > args["confidence"]:
# extract the index of the class label from the
# `detections`, then compute the (x, y)-coordinates of
# the bounding box for the object
idx = int(detections[0, 0, i, 1])
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# draw the prediction on the frame
label = "{}: {:.2f}%".format(CLASSES[idx],
confidence * 100)
cv2.rectangle(frame, (startX, startY), (endX, endY),
COLORS[idx], 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(frame, label, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# update the FPS counter
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop() | [
"richardbao419@gmail.com"
] | richardbao419@gmail.com |
e799f2913c67398be21fad9121cfa8058a2b5525 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_168/101.py | 7a20f29104dbb68ecb80573705f74c0418c596af | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,307 | py | DIRECTIONS = {
'^': (-1, 0),
'>': (0, 1),
'<': (0, -1),
'v': (1, 0)
}
def print_map(grid):
for row in grid:
print ''.join(row)
def get_arrows(grid, row, col):
if grid[row][col] == '.':
return None
seen = set()
arrows = []
while 0 <= row < len(grid) and 0 <= col < len(grid[0]):
# print 'here', row, col, len(grid, len(grid[0]))
if (row, col) in seen:
return None
if grid[row][col] != '.':
arrows.append((row, col))
seen.add((row, col))
cur_arrow_v, cur_arrow_h = DIRECTIONS[grid[row][col]]
row += cur_arrow_v
col += cur_arrow_h
return arrows
def solve(grid):
R = len(grid)
C = len(grid[0])
chains = {}
for row in range(R):
for col in range(C):
arrows = get_arrows(grid, row, col)
# print 'found chain', row, col, arrows, chains.keys()
if arrows is None:
continue
if arrows[-1] not in chains:
chains[arrows[-1]] = arrows
else:
if len(arrows) > len(chains[arrows[-1]]):
chains[arrows[-1]] = arrows
count = 0
for _, chain in chains.iteritems():
if len(chain) == 1:
found = False
for direction in DIRECTIONS.values():
row, col = chain[0]
row += direction[0]
col += direction[1]
while 0 <= row < len(grid) and 0 <= col < len(grid[0]):
if grid[row][col] != '.':
count += 1
# print 'found!', row, col
found = True
break
row += direction[0]
col += direction[1]
if found:
break
if not found:
return 'IMPOSSIBLE'
else:
count += 1
return count
input_file = open('a-large.in')
cases = int(input_file.readline().strip())
case = 0
while case < cases:
case += 1
R, C = [int(x) for x in input_file.readline().split()]
grid = []
for row in range(R):
grid.append(list(input_file.readline().strip()))
print "Case #{}: {}".format(case, solve(grid))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
713de2ef75b55a1d9819d4043f76d345658aeb8f | f889bc01147869459c0a516382e7b95221295a7b | /swagger_client/models/body_124.py | bcbd3e061f3cd63f829a301297edc59e8275681b | [] | no_license | wildatheart/magento2-api-client | 249a86f5c0289743f8df5b0324ccabd76f326512 | e6a707f85b37c6c3e4ef3ff78507a7deb8f71427 | refs/heads/master | 2021-07-14T16:01:17.644472 | 2017-10-18T13:33:08 | 2017-10-18T13:33:08 | 107,412,121 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,018 | py | # coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Body124(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'rule': 'SalesRuleDataRuleInterface'
}
attribute_map = {
'rule': 'rule'
}
def __init__(self, rule=None):
"""
Body124 - a model defined in Swagger
"""
self._rule = None
self.rule = rule
@property
def rule(self):
"""
Gets the rule of this Body124.
:return: The rule of this Body124.
:rtype: SalesRuleDataRuleInterface
"""
return self._rule
@rule.setter
def rule(self, rule):
"""
Sets the rule of this Body124.
:param rule: The rule of this Body124.
:type: SalesRuleDataRuleInterface
"""
if rule is None:
raise ValueError("Invalid value for `rule`, must not be `None`")
self._rule = rule
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Body124):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"sander@wildatheart.eu"
] | sander@wildatheart.eu |
267f0e68ab5b078ea706bdaf249440c2397dcdc5 | f0a4ba1f1f941092e68e4b1ef9cff0d3852199ef | /Do_it!/6.정렬 알고리즘/1.버블정렬/교환과정출력.py | 31042799e3d6bd884b811ed00421b53226ffdbd8 | [] | no_license | lsb530/Algorithm-Python | d41ddd3ca7675f6a69d322a4646d75801f0022b2 | a48c6df50567c9943b5d7218f874a5c0a85fcc6d | refs/heads/master | 2023-06-18T04:36:09.221769 | 2021-06-28T16:49:35 | 2021-06-28T16:49:35 | 367,775,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,458 | py | # 버블 정렬 알고리즘 구현(정렬 과정을 출력)
"""비교하는 두 원소 사이에 교환할 경우 +를, 교환하지 않을 경우 -를 출력합니다."""
from typing import MutableSequence
def bubble_sort_verbose(a: MutableSequence) -> None:
"""버블 정렬(정렬 과정을 출력)"""
ccnt = 0 # 비교 횟수
scnt = 0 # 교환 횟수
n = len(a)
for i in range(n - 1):
print(f'패스 {i + 1}')
for j in range(n - 1, i, -1):
for m in range(0, n - 1):
print(f'{a[m]:2}' + (' ' if m != j - 1 else
' +' if a[j - 1] > a[j] else ' -'), end='')
print(f'{a[n - 1]:2}')
ccnt += 1
if a[j - 1] > a[j]:
scnt += 1
a[j - 1], a[j] = a[j], a[j - 1]
for m in range(0, n - 1):
print(f'{a[m]:2}', end=' ')
print(f'{a[n - 1]:2}')
print(f'비교를 {ccnt}번 했습니다.')
print(f'교환을 {scnt}번 했습니다.')
if __name__ == '__main__':
print('버블 정렬을 수행합니다.')
num = int(input('원소 수를 입력하세요 : '))
x = [None] * num # 원소 수가 num인 배열을 생성
for i in range(num):
x[i] = int(input(f'x[{i}]: '))
bubble_sort_verbose(x) # 배열 x를 버블 정렬
print('오름차순으로 정렬했습니다.')
for i in range(num):
print(f'x[{i}] = {x[i]}')
| [
"lsb530@naver.com"
] | lsb530@naver.com |
84a9a4d1098814f15e297fd445f8f8ab8b0c8e2c | ef7c458371a2293dc438efc088312d0cf4eb56e8 | /misc/steve hack day/d3.py | c3a80c444efb8350acf3c2ca011cef3ff6255618 | [] | no_license | agvania/Sefaria-Data | efd5c7ea4c07fb1967ab415e7ffe7094bed7486b | 4ae83072d915f42fef891ef9e442ce21fe089b64 | refs/heads/master | 2021-01-15T16:11:13.666382 | 2016-06-23T00:45:05 | 2016-06-23T00:45:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,439 | py | # -*- coding: utf-8 -*-
__author__ = 'stevenkaplan'
import urllib
import urllib2
from urllib2 import URLError, HTTPError
import json
import pdb
import os
import sys
import re
import csv
p = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, p)
sys.path.insert(0, '../Match/')
from match import Match
os.environ['DJANGO_SETTINGS_MODULE'] = "sefaria.settings"
from local_settings import *
from functions import *
from sefaria.sheets import save_sheet
sys.path.insert(0, SEFARIA_PROJECT_PATH)
from sefaria.model import *
from sefaria.model.schema import AddressTalmud
all_books = library.all_index_records()
'''
for each commentary, grab LinkSet
'''
books = ["Genesis", "Exodus", "Leviticus", "Numbers", "Deuteronomy"]
commentaries = ["Abarbanel",
"Abravanel",
"Baal HaTurim",
"Chizkuni",
"Daat Zkenim",
"Haamek Davar",
"Ibn Ezra",
"Ikar Siftei Hachamim",
"Kitzur Baal Haturim",
"Kli Yakar",
"Malbim",
"Malbim Beur Hamilot",
"Metzudat David",
"Metzudat Zion",
"Or HaChaim",
"Penei Dovid",
"Rabbeinu Bachya",
"Rabbeinu Chananel",
"Radak",
"Ralbag",
"Ramban",
"Rashbam",
"Rashi",
"Saadia Gaon",
"Sepher Torat Elohim",
"Sforno",
"Shadal",
"Torah Temimah",
"Tiferet Yisrael",
"Toldot Aharon",
"Akeidat Yitzchak",
"Meshech Hochma",
"Shney Luchot HaBrit"
]
dict_refs = {}
probs = open('probs.txt','w')
max = 0
top_refs = []
csvf = open('d3_data.csv', 'w')
csvwriter = csv.writer(csvf, delimiter=';')
csvwriter.writerow(["Ref", "Number"])
for book in books:
book = library.get_index(book)
refs = book.all_segment_refs()
for ref in refs:
count_arr = []
for link in LinkSet(ref).array():
if link.contents()['type'] == 'commentary': #if there is time make sure three parshanut ones are included as they
#dont have commentary type
which_one = link.contents()['refs']
if which_one[0].find(' on ')>=0:
this_commentary = which_one[0].split(" on ")[0]
elif which_one[1].find(' on ')>=0:
this_commentary = which_one[1].split(" on ")[0]
else:
continue
if this_commentary in commentaries:
if this_commentary not in count_arr:
count_arr.append(this_commentary)
else:
probs.write(str(link.contents()['refs'])+'\n\n')
sum = len(count_arr)
if sum > max:
max = sum
if sum >= 13:
top_refs.append(ref)
dict_refs[ref] = sum
csvwriter.writerow([str(ref).replace(' ','_'), str(dict_refs[ref])])
csvf.close()
print max
sheet = {
"title": "Chumash Passages Most Commented On",
"sources": [{"ref": ref.normal()} for ref in top_refs],
"options": {"numbered": 1, "divineNames": "noSub"}
}
save_sheet(sheet, 1)
| [
"skaplan@brandeis.edu"
] | skaplan@brandeis.edu |
10620def303fa66435ffdd9215daf5bb3d45a01c | 4b379051aa3430eb2d8931f6055772731dcb199d | /512-Python_основы_и_применение/24468/stepik-512_24468-step3.py | deb1564bfd57d4325f1ccf6ff748ead36988e5cf | [] | no_license | dmikos/stepikCourse | 1416614ef51a4352374f37e86e3211c3b42cbaf6 | 3faeabfdc56cac597fb6b1495e7bb38a7f2a6816 | refs/heads/master | 2021-01-12T17:06:37.720050 | 2016-11-21T14:37:20 | 2016-11-21T14:37:20 | 69,057,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | x = [
("Guido", "van", "Rossum"),
("Haskell", "Curry"),
("John", "Backus")
]
x.sort(key=lambda name: len(" ".join(name)))
print(x)
| [
"dkostinov@gmail.com"
] | dkostinov@gmail.com |
570bca61bcb134fafb423856198d3af8acfd0527 | a1f2df675cfc595b15f1ca9390b7517989f2d4e0 | /testCase/organizations/testUpdateOrganization.py | 046acc5a93ef195d917a8110bb48969b3853b4b2 | [] | no_license | GGGYB/crm | d4def2f1abc89451e1c4b11b89ef100a842ed745 | 61932466dd0ac299adc661383d506389d5f0f8e7 | refs/heads/master | 2022-04-15T21:29:48.539311 | 2020-04-14T10:23:41 | 2020-04-14T10:23:41 | 255,575,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,783 | py | from bs4 import BeautifulSoup
import json
import requests
import random
import datetime
import re
import copy
from decimal import Decimal as D
from commons import common
from commons.const import const
class UpdateOrganization:
def __init__(self, cookie, csrf):
self.common = common.Common(cookie, csrf)
self.base_url = const.BASE_URL
self.base_url2 = const.SIGN_IN_BASE_URL
self.csrf = csrf
self.cookie = cookie
self.response = ''
self.announcements_id = []
self.staff_size_list = []
self.organization_industry = []
self.organization_id = []
self.params = ''
pass
# 获取公司规模list和所属行业list
def get_staff_size(self):
url = self.base_url + '/user_center/organization/edit'
response = self.common.get_html(url, '获取公司信息的页面')
soup = BeautifulSoup(response.text, 'html.parser')
staff_size = soup.findAll(attrs={'id': 'organization_client_attributes_staff_size'})
self.staff_size_list = re.findall(r"value=\"(.*?)\">", str(staff_size))
organization_industry = soup.findAll(attrs={'id': 'organization_client_attributes_industry'})
self.organization_industry = re.findall(r"value=\"(.*?)\">", str(organization_industry))
id = soup.findAll(attrs={"id": "organization_client_attributes_id"})
self.organization_id = re.findall(r'value=\"(.*?)"', str(id))
i = self.common.get_random_int(len(self.staff_size_list)-1)
n = self.common.get_random_int(len(self.organization_industry)-1)
return self.staff_size_list[i],self.organization_industry[n],self.organization_id[0]
# 编辑公司信息
def update_organization(self):
url = self.base_url + '/user_center/organization'
staff_size = self.get_staff_size()[0]
organization_industry = self.get_staff_size()[1]
organization_id = self.get_staff_size()[2]
body = {
'utf8': '✓',
'_method': 'patch',
'authenticity_token': self.csrf,
'attachment_id': '',
'organization[client_attributes][shorter_name]':'234465',
'organization[client_attributes][industry]':organization_industry,
'organization[client_attributes][province_id]': '9',
'organization[client_attributes][city_id]': '73',
'organization[client_attributes][district_id]': '732',
'organization[client_attributes][address_detail]': '1232',
'organization[client_attributes][staff_size]':staff_size,
'organization[client_attributes][id]':organization_id,
}
self.response = self.common.post_response_json(url, body, '编辑了公司信息')
| [
"nlnongling@163.com"
] | nlnongling@163.com |
70cd969e64eb3781764a99c34ea91ea7f1ff765c | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /105_network/_exercises/templates/Python Network Programming/Section 4 Python 3 Network Hacking/25. Build an SMTP Server Username Enumerator.py | 83e1773403b7d8eecf7fc35c8393a9feb0953587 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 2,144 | py | # ______ so.., ___, a_p_
# ____ d_t_ ______ d_t_
#
#
# ___ scan ip users
# ___
# s _ ?.? ?.A.. ?.S..
# s.c.. ip 25
# rsp _ s.r.. 1024
# s.s.. _"HELO friend\n")
# rsp _ s.r.. 1024
# __ _"250" no. __ ?
# print("[!] Something went wrong, exiting.")
# ___.e.. 0
# s.s.. _"MAIL FROM:nice@guy.com\n")
# rsp _ s.r.. 1024
# __ _"250" no. __ ?
# print("[!] Something went wrong, exiting.")
# ___.e.. 0
# ___ user __ u..
# s.s.. _"RCPT TO:" + ?.rs...en.. + _"\n")
# rsp _ s.r.. 1024
# __ _"250" __ rsp
# print("[+] Valid: " + ?.rs..
# s.s.. _)"QUIT\n"
# s.c..
# ______ E.. __ err
# print(st. ?
#
#
# ___ main args
# start _ d_t_.n..
# print("==================================================")
# print("Started @ " + st. s..
# print("==================================================")
# w__ o..(?.w__.li.. __ fle
# usr _ # list
# __ ?.b.. !_ 0:
# ___ user __ f..
# __ le. ? + 1 !_ ?.b..
# u__.ap.. ?
# ____
# u__.ap.. ?
# s.. ?.i. u..
# de. u..|;
# __ le. u.. > 0
# s.. ?.i. u..
# ____ # No batches
# s.. ?.i. f..
# stop _ d_t_.n..
# print("==================================================")
# print("Duration: " + st. s.. - s..
# print("Completed @ " + st. s..
# print("==================================================")
#
#
# __ _______ __ _______
# parser _ a_p_.A_P..
# ?.a_a.. "ip" a.._"store" h.._"smtp host address")
# ?.a_a.. "wordlist" a.._"store" h.._"wordlist of usernames")
# ?.a_a.. "-b" "--batch" a.._"store" n.._'?' c.._10
# d.._0 h.._"attempts per connection" ty.._in.
#
# __ le. ___.a.. 2; __ 0 # Show help if required arg not included
# ?.p_h..
# ?.e..
#
# args _ ?.p_a.. # Declare argumnets object to args
# m.. ? | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
77a65bd2b0809b86da8ca73174e6de833fe41f84 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-mrsp.0/mrsp_ut=3.5_rd=0.5_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=88/sched.py | 260c6e748e206e0e7bc567fd6fd3308a1768c01f | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | -S 0 -X RUN -Q 0 -L 2 111 400
-S 0 -X RUN -Q 0 -L 2 61 200
-S 3 -X RUN -Q 1 -L 1 59 175
-S 3 -X RUN -Q 1 -L 1 49 200
-S 2 -X RUN -Q 2 -L 1 47 150
-S 2 -X RUN -Q 2 -L 1 41 175
-S 1 -X RUN -Q 3 -L 1 39 125
-S 1 -X RUN -Q 3 -L 1 38 125
-S 4 35 125
-S 4 32 125
-S 4 31 200
-S 4 29 200
-S 4 22 150
-S 5 19 175
-S 5 8 100
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
656fa1aa409f51a7b609208dc999e0ec2acb948f | 4229a406a83a573dc357c1144cae7c5aad6f673b | /trestle/transforms/implementations/osco.py | 7c64e73849e8aa13bfab384882cbd1647df6a632 | [
"Apache-2.0"
] | permissive | xee5ch/compliance-trestle | dbc0647fe18e1164a75bcfdc4d38687df14e3247 | 969c10eceb73202d2b7856bac598f9b11afc696e | refs/heads/main | 2023-09-02T17:21:35.659432 | 2021-11-17T00:01:27 | 2021-11-17T00:01:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,680 | py | # -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2021 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Facilitate OSCAL-OSCO transformation."""
import json
import logging
from typing import Dict, List
from ruamel.yaml import YAML
from trestle.oscal.profile import Profile
from trestle.transforms.results import Results
from trestle.transforms.transformer_factory import FromOscalTransformer
from trestle.transforms.transformer_factory import ResultsTransformer
from trestle.transforms.utils.osco_helper import ResultsMgr
logger = logging.getLogger(__name__)
class OscoTransformer(ResultsTransformer):
"""Interface for Osco transformer."""
def __init__(self) -> None:
"""Initialize."""
self._results_mgr = ResultsMgr(self.get_timestamp())
@property
def analysis(self) -> List[str]:
"""Analysis."""
return self._results_mgr.analysis
def _ingest_xml(self, blob: str) -> Results:
"""Ingest xml data."""
# ?xml data
if blob.startswith('<?xml'):
resource = blob
self._results_mgr.ingest_xml(resource)
else:
return None
results = Results()
results.__root__.append(self._results_mgr.result)
return results
def _ingest_json(self, blob: str) -> Results:
"""Ingest json data."""
try:
# ? configmaps or auditree data
jdata = json.loads(blob)
# https://docs.openshift.com/container-platform/3.7/rest_api/api/v1.ConfigMap.html#Get-api-v1-namespaces-namespace-configmaps-name
if 'kind' in jdata.keys() and jdata['kind'] == 'ConfigMapList' and 'items' in jdata.keys():
items = jdata['items']
for item in items:
if 'data' in item.keys():
data = item['data']
if 'results' in data:
resource = item
self._results_mgr.ingest(resource)
# https://github.com/ComplianceAsCode/auditree-arboretum/blob/main/arboretum/kubernetes/fetchers/fetch_cluster_resource.py
else:
for key in jdata.keys():
for group in jdata[key]:
for cluster in jdata[key][group]:
if 'resources' in cluster:
for resource in cluster['resources']:
self._results_mgr.ingest(resource)
except json.decoder.JSONDecodeError:
return None
results = Results()
results.__root__.append(self._results_mgr.result)
return results
def _ingest_yaml(self, blob: str) -> Results:
"""Ingest yaml data."""
try:
# ? yaml data
yaml = YAML(typ='safe')
resource = yaml.load(blob)
self._results_mgr.ingest(resource)
except Exception as e:
raise e
results = Results()
results.__root__.append(self._results_mgr.result)
return results
def transform(self, blob: str) -> Results:
"""Transform the blob into a Results.
The expected blob is a string that is one of:
- data from OpenShift Compliance Operator (json, yaml, xml)
- data from Auditree OSCO fetcher/check (json)
"""
results = None
if results is None:
results = self._ingest_xml(blob)
if results is None:
results = self._ingest_json(blob)
if results is None:
results = self._ingest_yaml(blob)
return results
class ProfileToOscoTransformer(FromOscalTransformer):
"""Interface for Profile to Osco transformer."""
def __init__(
self,
extends='ocp4-cis-node',
api_version='compliance.openshift.io/v1alpha1',
kind='TailoredProfile',
name='customized-tailored-profile',
namespace='openshift-compliance',
) -> None:
"""Initialize."""
self._extends = extends
self._api_version = api_version
self._kind = kind
self._name = name
self._namespace = namespace
def transform(self, profile: Profile) -> str:
"""Transform the Profile into a OSCO yaml."""
# set values
set_values = self._get_set_values(profile)
# spec
spec = {
'description': self._get_metadata_prop_value(profile, 'profile_mnemonic', self._name),
'extends': self._get_metadata_prop_value(profile, 'base_profile_mnemonic', self._extends),
'title': profile.metadata.title,
'setValues': set_values,
}
disable_rules = self._get_disable_rules(profile)
if len(disable_rules) > 0:
spec['disableRules'] = disable_rules
# yaml data
ydata = {
'apiVersion': self._api_version,
'kind': self._kind,
'metadata': {
'name': self._get_metadata_prop_value(profile, 'profile_mnemonic', self._name),
'namespace': self._namespace,
},
'spec': spec,
}
return json.dumps(ydata)
def _get_set_values(self, profile) -> List[Dict]:
"""Extract set_paramater name/value pairs from profile."""
set_values = []
for set_parameter in profile.modify.set_parameters:
name = set_parameter.param_id
parameter_value = set_parameter.values[0]
value = parameter_value.__root__
rationale = self._get_rationale_for_set_value()
set_value = {'name': name, 'value': value, 'rationale': rationale}
set_values.append(set_value)
return set_values
def _get_metadata_prop_value(self, profile, name, default_) -> str:
"""Extract metadata prop or else default if not present."""
if profile.metadata.props is not None:
for prop in profile.metadata.props:
if prop.name == name:
return prop.value
logger.info(f'using default: {name} = {default_}')
return default_
def _get_disable_rules(self, profile) -> List[str]:
"""Extract disabled rules."""
value = []
if profile.imports is not None:
for item in profile.imports:
if item.exclude_controls is not None:
for control in item.exclude_controls:
if control.with_ids is not None:
for with_id in control.with_ids:
name = with_id.__root__
rationale = self._get_rationale_for_disable_rule()
entry = {'name': name, 'rationale': rationale}
value.append(entry)
return value
def _get_rationale_for_set_value(self) -> str:
"""Rationale for set value."""
return 'not determinable from specification'
def _get_rationale_for_disable_rule(self) -> str:
"""Rationale for disable rule."""
return 'not determinable from specification'
| [
"noreply@github.com"
] | xee5ch.noreply@github.com |
2458bd10820179534d5d1799a8f740ad985c965e | e55aacec5de90c52b9cb30742924bfffc584027d | /Implementation/Sock_Merchant.py | 4ce479a4de0eb4afa84ca17c6cf885526d745464 | [] | no_license | JonathanWu1120/Hackerrank_Algorithms | 5016a66d516c7a63033aee7f8c2aaa396b7ecdd6 | b53abe5e678a5ac11485068340df2c2a122370f4 | refs/heads/master | 2021-01-19T06:22:50.009873 | 2017-04-06T19:22:21 | 2017-04-06T19:22:21 | 87,459,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | #!/bin/python3
import sys
n = int(input().strip())
c = [int(c_temp) for c_temp in input().strip().split(' ')]
d = set(c)
count = 0
for i in range(len(d)):
x = d.pop()
while c.count(x) >= 2:
for i in range(2):
c.remove(x)
count += 1
print(count)
| [
"jwu166@binghamton.edu"
] | jwu166@binghamton.edu |
258895ade985a2bbe61e97b2d592fb53965ddf4f | 143e8939ac1033912195eb7e6b99f9d06ec908da | /dash/views.py | fd180cab200fdcca4ee3c06fd206eccd18f67823 | [
"BSD-3-Clause"
] | permissive | gvsurenderreddy/routerdash | 63a4c011ede20005f465cb2e9fcd1411c82d0aeb | 321030438008d317d46432e777191dfd4ad1c3d6 | refs/heads/master | 2021-01-18T09:21:26.250338 | 2014-01-03T18:05:40 | 2014-01-03T18:05:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,093 | py | import json
from django.shortcuts import render
from django.http import HttpResponse
from django.conf import settings
from .calculations import get_speeds, get_devices
def json_response(data):
return HttpResponse(json.dumps(data), content_type="application/json")
def human_speed(raw_speed):
return "..." if raw_speed is None else "%.02f Mb/s" % (raw_speed / 125000.0)
def home(request):
return render(request, "dashboard.html")
def ajax_speeds(request):
rx, tx = get_speeds(settings.EXTERNAL_INTERFACE)
rx_string = human_speed(rx)
tx_string = human_speed(tx)
return json_response([rx, tx, rx_string, tx_string])
def ajax_devices(request):
devices = get_devices(settings.BRIDGE_INTERFACE)
response = []
for device in devices.values():
if "rx_speed" in device:
device["rx_speed_human"] = human_speed(device["rx_speed"])
if "tx_speed" in device:
device["tx_speed_human"] = human_speed(device["tx_speed"])
response.append(device)
return json_response(sorted(response, key=lambda x: x['name']))
| [
"andrew@aeracode.org"
] | andrew@aeracode.org |
b8ce5561d98ed3ece5cdb19ca80579b5e232d69b | baffcef29e33658138c43ef358d7399ab3ea2c0d | /WORKFLOWS/Tools/NEC/NAL/nal-dashboard/nec_portal/dashboards/project/service/urls.py | 1b37e4e206fb65118cf9ebe5adc24feaf15b3061 | [
"Apache-2.0"
] | permissive | openmsa/NO | aa7d4ff000875bfcff0baee24555ec16becdb64e | 24df42ee3927415b552b5e5d7326eecd04ebca61 | refs/heads/master | 2020-03-09T23:21:09.657439 | 2019-03-29T06:29:07 | 2019-03-29T06:29:07 | 129,056,267 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# COPYRIGHT (C) NEC CORPORATION 2017
#
from django.conf.urls import patterns
from django.conf.urls import url
from nec_portal.dashboards.project.service import views
SERVISE = r'^(?P<group_id>[^/]+)/%s$'
urlpatterns = patterns(
'',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(SERVISE % 'detail', views.DetailView.as_view(), name='detail'),
url(SERVISE % '(?P<update_type>[^/]+)/update', views.UpdateView.as_view(),
name='update'),
)
| [
"ofa@ubiqube.com"
] | ofa@ubiqube.com |
5dc8894ebdb159580f4ba7c27c73bc5fd01c9294 | 14956dbed8ae4fba1d65b9829d9405fcf43ac698 | /Cyber Security/Capture the Flag Competitions/2020/Houseplant CTF 2020/Reversing/LEMON/pass2.py | 844c42e58d01e8397b7e1d91e91d5e8f21a17e00 | [] | no_license | Hackin7/Programming-Crappy-Solutions | ae8bbddad92a48cf70976cec91bf66234c9b4d39 | ffa3b3c26a6a06446cc49c8ac4f35b6d30b1ee0f | refs/heads/master | 2023-03-21T01:21:00.764957 | 2022-12-28T14:22:33 | 2022-12-28T14:22:33 | 201,292,128 | 12 | 7 | null | 2023-03-05T16:05:34 | 2019-08-08T16:00:21 | Roff | UTF-8 | Python | false | false | 2,040 | py | def checkpass():
userinput = input("Enter the password: ")
if userinput[0:4] == "rtcp":
if userinput[10:13] == "tHi":
if userinput[22:25] == "cuR":
if userinput[4:7] == "{y3":
if userinput[16:19] == "1nT":
if userinput[7:10] == "4H_":
if userinput[13:16] == "S_a":
if userinput[19:22] == "_sE":
if userinput [25:27] == "3}":
return True
else:
return False
def main():
access = checkpass()
if access == True:
print("Unlocked. The flag is the password.")
print("b-but i wunna show off my catswpeak uwu~... why wont you let me do my nya!!\noh well... good luck with the rest of the ctf :/\nbut I WANT TO SPWEAK CATGIRL NEXT TIME SO YOU BETTER LET ME >:(")
exit()
else:
print("Incorrect password!")
print("sowwy but now you gunnu have to listen to me spweak in cat giwrl speak uwu~")
catmain()
def catmain():
access = catcheckpass()
if access == True:
print("s-senpai... i unwocked it fowr you.. uwu~")
print("t-the fwlag is... the password.. nya!")
exit()
else:
print("sowwy but that wasnt quite rwight nya~")
catmain()
def catcheckpass():
userinput = input("pwease enter youwr password... uwu~ nya!!: ")
if userinput[0:4] == "rtcp":
if userinput[10:13] == "tHi":
if userinput[22:25] == "cuR":
if userinput[4:7] == "{y3":
if userinput[16:19] == "1nT":
if userinput[7:10] == "4H_":
if userinput[13:16] == "S_a":
if userinput[19:22] == "_sE":
if userinput [25:27] == "3}":
return True
else:
return False
access = False
main()
| [
"zunmun@gmail.com"
] | zunmun@gmail.com |
2e2d9059217f681eb729dbc014c5415f834c556b | 47e86e60f6239e0f8bf42aeecd8c8a0e8ac50578 | /izi_pos_report_birt/models/rpt_pos_revenue_customer_by_product_and_service_group.py | f9e4ff29a753dbf245b53fd939eba7cc996ab0ac | [] | no_license | HoangDinhHoi/DATN | 0a8d12c031253c7fe21321c4cf493ead3f71ca56 | 09157d6c79a779a701fc01727db8dcd04323dc1d | refs/heads/master | 2020-09-14T02:28:29.528907 | 2020-01-14T07:55:17 | 2020-01-14T07:55:17 | 222,985,616 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,956 | py | # -*- coding: utf-8 -*-
__author__ = "HoiHD"
from odoo import models, fields, api
from odoo.exceptions import ValidationError
import odoo.tools.config as config
class ReportRevenueCustomerAccordingToProductAndServiceGroup(models.TransientModel):
_name = 'rpt.revenue.customer.product.service.group'
_description = 'Báo cáo doanh thu của khách hàng theo nhóm sản phẩm và dịch vụ'
branch_id = fields.Many2one('res.branch', string='Branch',
domain=lambda self: [('id', 'in', self.env.user.branch_ids.ids)])
date_from = fields.Date(string='Date from')
date_to = fields.Date(string='Date to')
is_export_excel = fields.Boolean(default=False, string='Export to Excel')
@api.multi
def action_report(self):
"""
- Báo cáo doanh thu của khách hàng theo nhóm sản phẩm và dịch vụ
- date: 11/06/2019 on 9:01 AM
:return:
"""
birt_url = config['birt_url'] or '0'
if birt_url == '0':
raise ValidationError("Chưa cấu hình birt_url!")
date_from = self.date_from.strftime('%d/%m/%Y')
date_to = self.date_to.strftime('%d/%m/%Y')
report_name = "rpt_pos_revenue_customer_by_product_and_service_group.rptdesign"
param_str = {
'&date_from': date_from,
'&date_to': date_to,
'&branch_id': str(self.branch_id.id if self.branch_id else 0),
}
birt_link = birt_url + report_name
if self.is_export_excel:
birt_link += '&__format=xlsx'
return {
"type": "ir.actions.client",
'name': 'Báo cáo doanh thu của khách hàng theo nhóm sản phẩm và dịch vụ',
'tag': 'BirtViewerActionCurrent',
'target': 'self',
'context': {
'birt_link': birt_link,
'payload_data': param_str,
}
}
| [
"hoanghoihust@gmail.com"
] | hoanghoihust@gmail.com |
4c76b68a37d3fb42da48112d9d0d6e4f7407f257 | fdf531435b0a4d771083bab78f5a2f91b2ec1b28 | /Trees/Trees III (BST)/5. K DISTANCE.py | aab590fa8927baedd0c1519b90987bb627ad68fc | [] | no_license | srajsonu/100DaysOfCode | d556cf4c8491d2bea2bf6c17cc4410f64ae71829 | b25ff694a04a16bd2bdd33cf5bb84f9cbe5f3af6 | refs/heads/main | 2023-03-22T22:48:11.030576 | 2021-03-18T18:50:00 | 2021-03-18T18:50:00 | 325,747,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | class Solution:
def lcs(self, root, B, aux):
if not root:
return 0
l = self.lcs(root.left, B, aux + [root.val])
r = self.lcs(root.right, B, aux + [root.val])
cnt = 0
for i in aux:
if abs(i - root.val) <= B:
cnt += 1
cnt += (l + r)
return cnt
def solve(self, A, B):
return self.lcs(A, B, [])
| [
"srajsonu02@gmail.com"
] | srajsonu02@gmail.com |
156ad1f2936e53c9ed0efabc77d0de0a24482fe4 | 3b93f91703a36f8ec8fd1767e719f3e3523ab6f1 | /MyNao/demo_0526.py/online_beat_extract.py | 1d4ac37dd69496ebb86a00f1d64d6c4cef3a7f9e | [] | no_license | SutirthaChakraborty/Real-Time-Music-Driven-Dancing-Robot | 66655b1ed1d000499096295587c9c902a636b688 | 98ab75e8ab199a56f1e80854a891fcf4425dd042 | refs/heads/master | 2023-02-13T01:57:56.185295 | 2021-01-04T19:12:33 | 2021-01-04T19:12:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,346 | py | import numpy as np
import os
import time
from madmom.features.beats import DBNBeatTrackingProcessor, RNNBeatProcessor
from madmom.models import BEATS_LSTM
from madmom.processors import IOProcessor, process_online
from madmom.io import write_beats
def beat_extractor(queue_beat):
kwargs = dict(
fps = 100,
correct = True,
infile = None,
outfile = None,
max_bpm = 170,
min_bpm = 60,
#nn_files = [BEATS_LSTM[0]],
transition_lambda = 100,
num_frames = 1,
online = True,
verbose = 0
)
def beat_callback(beats, output=None):
if len(beats) > 0:
# Do something with the beat (for now, just print the array to stdout)
if not queue_beat.empty():
_ = queue_beat.get()
queue_beat.put(beats[0])
print(beats)
#print('Process to write betas: %s' % os.getpid())
in_processor = RNNBeatProcessor(**kwargs)
beat_processor = DBNBeatTrackingProcessor(**kwargs)
out_processor = [beat_processor, beat_callback]
processor = IOProcessor(in_processor, out_processor)
process_online(processor, **kwargs)
def beat_simulator(queue_beat):
t1=time.time()
while True:
time.sleep(60/120)
t=time.time()-t1
if not queue_beat.empty():
_ = queue_beat.get()
queue_beat.put(t) | [
"zhaojw1998@outlook.com"
] | zhaojw1998@outlook.com |
39297cd1c0e6a9b2bb905e03f0217212d334a1ae | 7e76a72a67596ca230f83b4654615734f8d93414 | /kreddit/mainapp/migrations/0003_auto_20160324_1930.py | 564cc0ecc22e099763b9eb4fa1d560b3d6fe60af | [] | no_license | kjmullen/reddit-remake | dc6d7893034769664bea63f84437c9c02a4e95a5 | e49314d29f752fdb48406c03ed992446415fd3ce | refs/heads/master | 2020-12-28T17:30:08.117132 | 2016-03-31T05:19:03 | 2016-03-31T05:19:03 | 54,669,553 | 0 | 0 | null | 2016-03-24T19:43:38 | 2016-03-24T19:43:38 | null | UTF-8 | Python | false | false | 430 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-25 02:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0002_post_url'),
]
operations = [
migrations.AlterField(
model_name='post',
name='url',
field=models.URLField(null=True),
),
]
| [
"kevinkozzik@gmail.com"
] | kevinkozzik@gmail.com |
adcb16ed9299cb9b835cd7ecd2e76c1ef88cbb49 | a9c0daa4a7b9a4d7341afcab270c5b5debb8c13f | /env/lib/python3.6/site-packages/ebcli/__init__.py | bc9e527698d03b59bda58a57cc898f4e3a39d984 | [] | no_license | phamcong/alienator-plf | bad8c4e003fd189c43243b31ef2b975b6f154754 | ea65628af66fbca51f2248ceb4ba93f858dbddce | refs/heads/master | 2022-11-26T01:28:38.286261 | 2017-11-07T15:12:08 | 2017-11-07T15:12:08 | 109,412,097 | 0 | 1 | null | 2020-07-25T23:43:17 | 2017-11-03T15:30:22 | JavaScript | UTF-8 | Python | false | false | 615 | py | #!/usr/bin/env python
# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
__version__ = '3.7.7'
| [
"ccuong.ph@gmail.com"
] | ccuong.ph@gmail.com |
ab00aee802d3683a271cb6fc3e219e1ab5e30668 | ab3e72ca2d146055f2966cecd03e077c84b318e3 | /mlcomp/worker/sync.py | 9f2e14cce4761faa66e31012d99e8074b59ba62c | [
"Apache-2.0"
] | permissive | megachester/mlcomp | 69ec01b0fbf55489b860a64c0fde19e39a8d45b4 | 8d30ba0a52e225144533e68295b71acb49e3c68a | refs/heads/master | 2021-02-18T03:58:17.820726 | 2020-03-05T08:28:19 | 2020-03-05T08:28:19 | 245,157,392 | 0 | 0 | Apache-2.0 | 2020-03-05T12:27:58 | 2020-03-05T12:27:58 | null | UTF-8 | Python | false | false | 6,265 | py | import os
import socket
import time
import traceback
import subprocess
from os.path import join
from typing import List
from mlcomp import FILE_SYNC_INTERVAL
from mlcomp.db.core import Session
from mlcomp.db.enums import ComponentType
from mlcomp.db.models import Computer, TaskSynced
from mlcomp.db.providers import ComputerProvider, \
TaskSyncedProvider, DockerProvider, ProjectProvider
from mlcomp.utils.logging import create_logger
from mlcomp.utils.misc import now
from mlcomp.utils.io import yaml_load, yaml_dump
def sync_directed(
session: Session, source: Computer, target: Computer,
ignore_folders: List
):
current_computer = socket.gethostname()
end = ' --perms --chmod=777 --size-only'
logger = create_logger(session, __name__)
for folder, excluded in ignore_folders:
if len(excluded) > 0:
excluded = excluded[:]
for i in range(len(excluded)):
excluded[i] = f'--exclude {excluded[i]}'
end += ' ' + ' '.join(excluded)
source_folder = join(source.root_folder, folder)
target_folder = join(target.root_folder, folder)
if current_computer == source.name:
command = f'rsync -vhru -e ' \
f'"ssh -p {target.port} -o StrictHostKeyChecking=no" ' \
f'{source_folder}/ ' \
f'{target.user}@{target.ip}:{target_folder}/ {end}'
elif current_computer == target.name:
command = f'rsync -vhru -e ' \
f'"ssh -p {source.port} -o StrictHostKeyChecking=no" ' \
f'{source.user}@{source.ip}:{source_folder}/ ' \
f'{target_folder}/ {end}'
else:
command = f'rsync -vhru -e ' \
f'"ssh -p {target.port} -o StrictHostKeyChecking=no" ' \
f' {source_folder}/ ' \
f'{target.user}@{target.ip}:{target_folder}/ {end}'
command = f'ssh -p {source.port} ' \
f'{source.user}@{source.ip} "{command}"'
logger.info(command, ComponentType.WorkerSupervisor, current_computer)
subprocess.check_output(command, shell=True)
def copy_remote(
session: Session, computer_from: str, path_from: str, path_to: str
):
provider = ComputerProvider(session)
src = provider.by_name(computer_from)
host = socket.gethostname()
if host != computer_from:
c = f'scp -P {src.port} {src.user}@{src.ip}:{path_from} {path_to}'
else:
f'cp {path_from} {path_to}'
subprocess.check_output(c, shell=True)
return os.path.exists(path_to)
class FileSync:
session = Session.create_session(key='FileSync')
logger = create_logger(session, 'FileSync')
def sync_manual(self, computer: Computer, provider: ComputerProvider):
"""
button sync was clicked manually
"""
if not computer.meta:
return
meta = yaml_load(computer.meta)
if 'manual_sync' not in meta:
return
manual_sync = meta['manual_sync']
project_provider = ProjectProvider(self.session)
docker_provider = DockerProvider(self.session)
dockers = docker_provider.get_online()
project = project_provider.by_id(manual_sync['project'])
for docker in dockers:
if docker.computer == computer.name:
continue
source = provider.by_name(docker.computer)
ignore_folders = [
[join('models', project.name), []]
]
sync_directed(self.session, target=computer, source=source,
ignore_folders=ignore_folders)
del meta['manual_sync']
computer.meta = yaml_dump(meta)
provider.update()
def sync(self):
hostname = socket.gethostname()
try:
provider = ComputerProvider(self.session)
task_synced_provider = TaskSyncedProvider(self.session)
computer = provider.by_name(hostname)
sync_start = now()
if FILE_SYNC_INTERVAL == 0:
time.sleep(1)
else:
self.sync_manual(computer, provider)
computers = provider.all_with_last_activtiy()
computers = [
c for c in computers
if (now() - c.last_activity).total_seconds() < 10
]
computers_names = {c.name for c in computers}
for c, project, tasks in task_synced_provider.for_computer(
computer.name):
if c.sync_with_this_computer:
if c.name not in computers_names:
self.logger.info(f'Computer = {c.name} '
f'is offline. Can not sync',
ComponentType.WorkerSupervisor,
hostname)
continue
if c.syncing_computer:
continue
ignore_folders = [
[join('models', project.name), []]
]
computer.syncing_computer = c.name
provider.update()
sync_directed(self.session, c, computer,
ignore_folders)
for t in tasks:
task_synced_provider.add(
TaskSynced(computer=computer.name, task=t.id)
)
time.sleep(FILE_SYNC_INTERVAL)
computer.last_synced = sync_start
computer.syncing_computer = None
provider.update()
except Exception as e:
if Session.sqlalchemy_error(e):
Session.cleanup('FileSync')
self.session = Session.create_session(key='FileSync')
self.logger = create_logger(self.session, 'FileSync')
self.logger.error(
traceback.format_exc(), ComponentType.WorkerSupervisor,
hostname
)
| [
"lightsanweb@yandex.ru"
] | lightsanweb@yandex.ru |
2c6136ca3eed03141ace8fce8413f3ad06aafd6b | 2aec9c5e8c72b731d3abf22f2a407fe09c1cde09 | /ZhihuTopicRedis/ZhihuTopicRedis/items.py | fb602cdbc16750b70dda7c86a8886b6a5e3cd084 | [] | no_license | jiangyg/ZWFproject | 8b24cc34970ae0a9c2a2b0039dc527c83a5862b5 | aa35bc59566d92721f23d2dd00b0febd268ac2dd | refs/heads/master | 2020-09-26T17:01:00.229380 | 2019-11-15T13:16:21 | 2019-11-15T13:16:21 | 226,297,631 | 0 | 1 | null | 2019-12-06T09:55:37 | 2019-12-06T09:55:36 | null | UTF-8 | Python | false | false | 705 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ZhihutopicItem(scrapy.Item):
# 需要查询的关键字
kw = scrapy.Field()
# 标题(话题的标题)
title = scrapy.Field()
# 话题logo
avatar_url = scrapy.Field()
# 描述(对于这段话题的描述)
description = scrapy.Field()
# 关注人数
followers_count = scrapy.Field()
# 搜索的ID
id_no = scrapy.Field()
# 问题数量
questions_count = scrapy.Field()
# 精华数量
top_answer_count = scrapy.Field()
# 话题链接
topic_url = scrapy.Field()
| [
"34021500@qq.com"
] | 34021500@qq.com |
7588cf29b7c6192559f88f22f8cdbfc8c949a6e1 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_035/ch60_2020_04_27_20_18_54_214980.py | 97e2bc41ce7240c34dd8c65e762505a340707549 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | def eh_palindromo(string):
contador = True
e = 0
while contador:
if string[e]==len(string)-e:
contador = True
e =+ 1
else:
contador = False
return contador | [
"you@example.com"
] | you@example.com |
567ec3b3318af7a4312913b815332dea5f0b7607 | 770f6e034a5482d4999b00b182e616f84d95ffdf | /testing/cross_language/json_test.py | 73554eb235f32a0d0092484a2ccf85f03f5de77e | [
"Apache-2.0"
] | permissive | ptylll/tink | f2f274bcb8b6d8449e25ef975e60bff8945d3406 | eafd9283b1d1da1dfc08c5297c101cd4b2d530c5 | refs/heads/master | 2022-11-27T14:55:51.381202 | 2020-08-03T14:39:04 | 2020-08-03T14:39:37 | 284,962,074 | 1 | 0 | Apache-2.0 | 2020-08-04T11:41:22 | 2020-08-04T11:41:21 | null | UTF-8 | Python | false | false | 3,559 | py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cross-language tests for JSON serialization."""
from absl.testing import absltest
from absl.testing import parameterized
from tink.proto import tink_pb2
from util import supported_key_types
from util import testing_servers
def setUpModule():
testing_servers.start('json')
def tearDownModule():
testing_servers.stop()
def _keyset_proto(keyset: bytes) -> tink_pb2.Keyset:
keyset_proto = tink_pb2.Keyset()
keyset_proto.ParseFromString(keyset)
# We sort the keys, since we want keysets to be considered equal even if the
# keys are in different order.
keyset_proto.key.sort(key=lambda k: k.key_id)
return keyset_proto
def _is_equal_keyset(keyset1: bytes, keyset2: bytes) -> bool:
"""Checks if two keyset are equal, and have the exact same keydata.value."""
# Keydata.value are serialized protos. This serialization is usually not
# deterministic, as it is a unsorted list of key value pairs.
# But since JSON serialization does not change keyset.value, we can simply
# require these values to be exactly the same in this test. In other tests,
# this might be too strict.
return _keyset_proto(keyset1) == _keyset_proto(keyset2)
class JsonTest(parameterized.TestCase):
def test_is_equal_keyset(self):
keyset1 = tink_pb2.Keyset()
key11 = keyset1.key.add()
key11.key_id = 21
key12 = keyset1.key.add()
key12.key_id = 42
keyset2 = tink_pb2.Keyset()
key21 = keyset2.key.add()
key21.key_id = 42
key22 = keyset2.key.add()
key22.key_id = 21
self.assertTrue(_is_equal_keyset(keyset1.SerializeToString(),
keyset2.SerializeToString()))
def test_is_not_equal_keyset(self):
keyset1 = tink_pb2.Keyset()
key11 = keyset1.key.add()
key11.key_id = 21
key12 = keyset1.key.add()
key12.key_id = 42
keyset2 = tink_pb2.Keyset()
key3 = keyset2.key.add()
key3.key_id = 21
self.assertFalse(_is_equal_keyset(keyset1.SerializeToString(),
keyset2.SerializeToString()))
def assertEqualKeyset(self, keyset1: bytes, keyset2: bytes):
if not _is_equal_keyset(keyset1, keyset2):
self.fail('these keysets are not equal: \n%s\n \n%s\n'
% (_keyset_proto(keyset1), _keyset_proto(keyset2)))
@parameterized.parameters(
supported_key_types.test_cases(supported_key_types.ALL_KEY_TYPES))
def test_to_from_json(self, key_template_name, supported_langs):
self.assertNotEmpty(supported_langs)
key_template = supported_key_types.KEY_TEMPLATE[key_template_name]
# Take the first supported language to generate the keyset.
keyset = testing_servers.new_keyset(supported_langs[0], key_template)
for to_lang in supported_langs:
json_keyset = testing_servers.keyset_to_json(to_lang, keyset)
for from_lang in supported_langs:
keyset2 = testing_servers.keyset_from_json(from_lang, json_keyset)
self.assertEqualKeyset(keyset, keyset2)
if __name__ == '__main__':
absltest.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
33e78c202f9b437a7c135e49ee2046362470f244 | 32a4c816b70964a0aa1f0370b51e57c93ad0b7be | /finance/helpers/transfers.py | 563db7d32b2043bbe5d1b1e30317224f1c32efb2 | [] | no_license | mithro/timsfinance | 0cba66a9c60b3b7d35e45e2d0304fc2221306951 | fd65bdd77d28ba203bf741f72bb6e102fe3cc8e5 | refs/heads/importer-rewrite | 2020-12-24T17:08:54.281576 | 2013-07-19T08:18:02 | 2013-07-19T08:18:02 | 2,638,754 | 1 | 1 | null | 2013-07-09T05:08:10 | 2011-10-24T19:53:14 | Python | UTF-8 | Python | false | false | 1,977 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: set ts=4 sw=4 et sts=4 ai:
import datetime
from finance import models
from finance.helpers import base
class Transfers(base.Helper):
"""
The linker looks for transactions which should be linked together, such as
transfers between accounts.
"""
# Descriptions which match the following should be looked at
# Anything with "PAYMENT" in it
# Anything with "TRANSFER" in it
TRANSFERS = ("PAYMENT", "PMNT", "TRANSFER", "Direct Debit")
def __init__(self, *args, **kw):
base.Helper.__init__(self, *args, **kw)
self.category = models.Category.objects.get(category_id='transfer')
def associate(self, a, b):
return base.Helper.associate(self, a, b, relationship="TRANSFER")
def handle(self, account, trans):
for desc_match in self.TRANSFERS:
if desc_match in trans.imported_description.upper():
break
else:
return
print
print trans
# If this already had reference set, then done
related = trans.related_transactions(relationship="TRANSFER")
if len(related) > 0:
print " ", related
return
# First attempt to find a transaction 7 days either way with the exact same amount
q = models.Transaction.objects.all(
).filter(imported_entered_date__gt=trans.imported_entered_date-datetime.timedelta(days=7)
).filter(imported_entered_date__lt=trans.imported_entered_date+datetime.timedelta(days=7)
).filter(imported_amount__exact=-trans.imported_amount
)
if len(q) == 1:
r = self.associate(trans, q[0])
print " Exact: ", r
r.save()
trans.primary_category = self.category
trans.save()
q[0].primary_category = self.category
q[0].save()
else:
print " Exact: ", q
| [
"mithro@mithis.com"
] | mithro@mithis.com |
514eecee3ad930fa6d0385a5613b59f1500b339b | acbbcc2daff7538534604e512215a84b2f8e11ba | /Cluster/OMapWordlist.py | 33b2bba6fc02b0ff96c6c2c527f93658e1f4b0ff | [] | no_license | ZhuJiahui/MicroblogDataStreamCompress | 7cfed72b8cb51c171f0d82c243baf62fbef0df55 | a040a6ebb7b449591f79fef8e71f40719ee78c0b | refs/heads/master | 2016-09-06T05:40:51.017616 | 2014-04-06T07:17:16 | 2014-04-06T07:17:16 | 18,484,462 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,584 | py | # -*- coding: utf-8 -*-
'''
Created on 2014年1月5日
@author: ZhuJiahui506
'''
import os
import numpy as np
from Reflect import reflect_vsm_to_wordlist
from TextToolkit import quick_write_list_to_text
import time
def map_word_list(read_directory1, read_directory2, write_filename):
#文件总数
file_number = np.sum([len(files) for root, dirs, files in os.walk(read_directory1)])
result = []
for i in range(file_number):
word_list = []
f = open(read_directory2 + '/' + str(i + 1) + '.txt')
line = f.readline()
while line:
word_list.append(line.strip())
line = f.readline()
f.close()
vsm = np.loadtxt(read_directory1 + '/' + str(i + 1) + '.txt')
vsm = vsm.T
for each in vsm:
result.append(" ".join(reflect_vsm_to_wordlist(each, word_list)))
quick_write_list_to_text(result, write_filename)
if __name__ == '__main__':
start = time.clock()
now_directory = os.getcwd()
root_directory = os.path.dirname(now_directory) + '/'
read_directory1 = 'D:/Local/DataStreamMining/dataset/cluster/topics_data22/original_cluster_center'
read_directory2 = 'D:/Local/DataStreamMining/dataset/cluster/topics_data22/original_merge_wordlist'
write_filename = 'D:/Local/DataStreamMining/dataset/cluster/topics_data22/cluster_text_result.txt'
map_word_list(read_directory1, read_directory2, write_filename)
print 'Total time %f seconds' % (time.clock() - start)
print 'Complete !!!'
| [
"zhujiahui@outlook.com"
] | zhujiahui@outlook.com |
36934efddf72964da5dcd53a82fea8b6a5f1761b | 295efbd5b466d246ff51c3849cea5ff637df6211 | /model1.py | 3bf46b97fdf59bacd284ffebaeefbf6c9db673bc | [] | no_license | allenjianzhe/mip_heuristic | 75b15ce9d9735fdc0f5381bfef8cded4d5348a12 | 1365b63b2b3a3814b271e3bb95fb6671486e84fc | refs/heads/master | 2020-09-26T08:41:57.244645 | 2016-10-12T22:43:09 | 2016-10-12T22:43:09 | 66,394,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,955 | py | #file name 060915.py for MIP1
import sys
#sys.path.append('C:/gurobi562/win64/python27/lib/gurobipy')
sys.path.append('C:/gurobi604/win32/python27/lib/gurobipy') # for apple
from gurobipy import *
from read052815_apple import *
import time
start_time=time.clock()
m = Model('MIP')
M=1000000000
F=600000
def MIP1(m,customer,arc_C):
####################################################################################################
#decision variable X: binary variable. X[customer,i,i+1,m,k]
X = {}
for row in customer:
for i in nodes:
if i == 2:
continue
else:
for k in modes:
for s in departure:
X[int(row[0]),i,k,s]=m.addVar(obj=int(row[2])*arc_trans_cost[int(row[0]),i,k],vtype=GRB.BINARY,name='X_%s_%s_%s_%s'%(int(row[0]),i,k,s))
m.update()
#decision variable y: binary variable of 3PL
global y
global F
for row in customer:
for i in nodes:
y[int(row[0]),i]=m.addVar(obj=int(row[2])*F,vtype=GRB.BINARY,name='y_%s_%s'%(int(row[0]),i))
m.update()
#decision variable: arrive time at each node
t={}
for row in customer:
for i in nodes:
t[int(row[0]),i]=m.addVar(obj=0,vtype='C',name='nodeTime_%s_%s'%(int(row[0]),i))
#decision variable:Time tardiness of customer
T={}
for row in customer:
T[int(row[0])]=m.addVar(obj=int(row[2])*int(row[4]),vtype='C',name='Tardiness_%s'%(int(row[0])))
m.update()
####################################################################################################
#Constraint 3.2 for each customer, each link, only one plan can be selected
for row in customer:
for i in nodes:
if i==2:
continue
else:
expr = LinExpr()
for k in modes:
for s in departure:
expr.addTerms(1.0,X[int(row[0]),i,k,s])
expr.add(y[int(row[0]),i])
m.addConstr(expr, GRB.EQUAL, 1,name='One_%s_%s_%s_%s'%(int(row[0]),i,k,s))
m.update()
####################################################################################################
#constraint 3.4 arc capacity
for k in modes:
for s in departure:
for i in nodes:
if i==2:
continue
else:
expr = LinExpr()
for row in customer:
expr.addTerms(int(row[2]),X[int(row[0]),i,k,s])
expr.addConstant(-1*arc_C[i,k,s])
m.addConstr(expr,GRB.LESS_EQUAL, 0,'arcCapacity_%s_%s_%s_%s'%(int(row[0]),i,k,s))
m.update()
#constraint 3.5 time constraint One
for row in customer:
for i in nodes:
if i==2:
continue
else:
expr = LinExpr()
for k in modes:
for s in departure:
expr.addTerms(dT[i,k,s]+trans_time[i,k],X[int(row[0]),i,k,s])
expr.add(-1*y[int(row[0]),i]*M)
m.addConstr(expr,GRB.LESS_EQUAL,t[int(row[0]),i],name='timeConstr1_%s_%s'%(int(row[0]),i))
m.update()
#definition of T
for row in customer:
for k in modes:
for s in departure:
if X[int(row[0]),1,k,s]>0:
if t[int(row[0]),1]>DD[int(row[0])]:
m.addConstr(T[int(row[0])],GRB.EQUAL,t[int(row[0]),1]-DD[int(row[0])],name='Tardiness_%s'%(int(row[0])) )
m.update()
m.__data=X,y,t,T
return m
| [
"allenjianzhe@yahoo.com"
] | allenjianzhe@yahoo.com |
522c35efeef1b14d4c9cd6776ee2646f511fcc33 | b55f70755712b26688b80a8ba3806a4124fbcd11 | /LinkedList/reverse_linkedlist.py | dc10bda1403d739325203fbdc3e6d560945d7f20 | [] | no_license | Shanshan-IC/Algorithm_Python | a44703a0f33370c47e3e55af70aadeae08d5a1a5 | ace23976d2f1f51141498c4c4ea6bca0039b233f | refs/heads/master | 2021-09-08T07:16:59.576674 | 2018-03-08T09:24:01 | 2018-03-08T09:24:01 | 114,254,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | ''''
http://www.lintcode.com/zh-cn/problem/reverse-linked-list/
'''
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param: head: n
@return: The new head of reversed linked list.
"""
def reverse(self, head):
cur = None
while head:
tmp = head.next
head.next = cur
cur = head
head = tmp
return cur
| [
"shanshan.fu15@imperial.ac.uk"
] | shanshan.fu15@imperial.ac.uk |
d9f43d1c561b4a410c8366be55dde291f4661542 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_07_01/operations/_usage_operations.py | ce8b45dc92c31a4dae09fdf96881fa1357edd348 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 5,392 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UsageOperations(object):
"""UsageOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListUsagesResult"]
"""Gets, for the specified location, the current compute resource usage information as well as the
limits for compute resources under the subscription.
:param location: The location for which resource usage is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListUsagesResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2019_07_01.models.ListUsagesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListUsagesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListUsagesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/usages'} # type: ignore
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
13cbf185e243bfd91725740f6832e60a9a782369 | 148ba38e84919b7a44fa193e2899993051fdb542 | /tests/requester_session.py | e637a86122e6c2896ab3ef483741bd0d3abf7332 | [
"MIT"
] | permissive | igor-kupczynski/Requester | e8e76f8f421558638439083d4863445d3b6ad5db | 93fd2673cf69a97b0e0d49a2411abe6148326f41 | refs/heads/master | 2022-03-05T21:10:59.099429 | 2019-11-12T14:06:58 | 2019-11-12T14:06:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | # flake8: noqa
###env
import requests
s = requests.Session()
s.get('http://127.0.0.1:8000/cookies/set?k0=v0', timeout=5)
s.headers.update({'X-Test': 'true'})
prepped = s.prepare_request(requests.Request('POST', 'http://127.0.0.1:8000/post', json={'a': 'b'}))
###env
s.get('http://127.0.0.1:8000/cookies/set?k1=v1', allow_redirects=False)
s.send(prepped)
| [
"kylebebak@gmail.com"
] | kylebebak@gmail.com |
8024ba5b4330783ded7dd067ea1cf890af49dc58 | 6189f34eff2831e3e727cd7c5e43bc5b591adffc | /WebMirror/management/rss_parser_funcs/feed_parse_extractNightFallTranslations.py | 594af0962bcbd229a5adb300cb23106ebecb7668 | [
"BSD-3-Clause"
] | permissive | fake-name/ReadableWebProxy | 24603660b204a9e7965cfdd4a942ff62d7711e27 | ca2e086818433abc08c014dd06bfd22d4985ea2a | refs/heads/master | 2023-09-04T03:54:50.043051 | 2023-08-26T16:08:46 | 2023-08-26T16:08:46 | 39,611,770 | 207 | 20 | BSD-3-Clause | 2023-09-11T15:48:15 | 2015-07-24T04:30:43 | Python | UTF-8 | Python | false | false | 222 | py | def extractNightFallTranslations(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
return False
| [
"something@fake-url.com"
] | something@fake-url.com |
e69a89c9aca5d7a747ce35c9a3614f0749e9b9c6 | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /netex/models/class_of_use_ref_structure.py | acfe741dd3c81420428778d7f91ac88418740eea | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 225 | py | from dataclasses import dataclass
from .type_of_value_ref_structure import TypeOfValueRefStructure
__NAMESPACE__ = "http://www.netex.org.uk/netex"
@dataclass
class ClassOfUseRefStructure(TypeOfValueRefStructure):
pass
| [
"chris@komposta.net"
] | chris@komposta.net |
4914f091478c59559e9a61fa777ab6870e7ab96b | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/224/users/4347/codes/1596_2894.py | 372d41623df33594fd6a8e6a027a5fb1410be9b1 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | valor_do_jogo1= float(input("Qual o valor unitario do jogo? "))
valor_do_frete= 45.0
total=valor_do_jogo1 * 8 + valor_do_frete
print(total) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
9c0c4fb179834a7ca36de9a9189ada09e37ceef0 | bfb35e35c2e44b55ac77c66b483d737e0a947e16 | /20210201/生成pyd文件/To_pyd.py | 959cdac1739c66aa26cbc736972887f959be7102 | [] | no_license | Aimee888/Python-20210127 | 8c68230650f4ebb9b06c0facffc2976bd653d8ab | c683d440e21821f2a9a0e52d39d82071ae0b3405 | refs/heads/main | 2023-03-17T04:32:58.714464 | 2021-02-27T01:18:29 | 2021-02-27T01:18:29 | 333,348,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | #!/usr/bin/env python
# _*_ coding: UTF-8 _*_
"""=================================================
@Project -> File : Python-20210127 -> To_pyd.py
@IDE : PyCharm
@Author : Aimee
@Date : 2021/2/1 10:51
@Desc :
================================================="""
def test():
print("Test .pyd function")
| [
"961745931@qq.com"
] | 961745931@qq.com |
2f30f57136d05751f501d186279ee37d5dc703c3 | 38edf3974ac953fc916858651ccea167470e5ec8 | /Hackfest/Hackfest-Resources/Level 7/flip.py | 2f75915a8a24f7612d0451685ad3e69d68291724 | [] | no_license | duaraghav8/Duncheon | 597d49417aec9784c7428c740a2cfc5c5469079b | 69f7a775cb7df4d7ae99a3766260c3a098c52978 | refs/heads/master | 2016-09-06T07:19:34.153274 | 2015-08-14T17:31:37 | 2015-08-14T17:31:37 | 39,578,779 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | password = "136ilovebunnies247"
enc = ""
for c in password:
enc += chr(255-ord(c))
f = open('password.txt', 'w')
f.write(enc)
| [
"duaraghav8@gmail.com"
] | duaraghav8@gmail.com |
d8f44e410c0a7f63d40415236b0febfddc26566e | 754da0ff4d08621511e905c088284e8680edc0df | /0x03-python-data_structures/2-replace_in_list.py | b1e91dc09721778cdf7a954c04007114b87fe111 | [] | no_license | AngieCastano1634/holbertonschool-higher_level_programming-1 | 0e77289f6761f2057bed5b2163df43dea4d6908a | 67d5ed8181c51f5ff67a905fff39dcc605fb32ce | refs/heads/master | 2022-12-22T13:20:36.889380 | 2020-09-25T03:31:25 | 2020-09-25T03:31:25 | 299,758,479 | 0 | 1 | null | 2020-09-29T23:10:59 | 2020-09-29T23:10:58 | null | UTF-8 | Python | false | false | 248 | py | #!/usr/bin/python3
def replace_in_list(my_list, idx, element):
if idx < 0 or idx > (len(my_list) - 1):
return my_list
for i in range(0, len(my_list)):
if i == idx:
my_list[i] = element
return my_list
| [
"andergcp@hotmail.com"
] | andergcp@hotmail.com |
e6f2da70126b5f9645097ed27b380bfdb7169bfe | ea43a4ca9ad544fc3107aff1e5a4dd8c9a3898b1 | /biliob_analyzer/coin.py | e8d7027b84952516743eee5660fc09d0ea323b86 | [
"MIT"
] | permissive | z464244404/biliob-spider | 9447459cb722f71b6419ecaa53f80fdcbae11b22 | dc6880ab8457e469677575d1961899bd966bc1b0 | refs/heads/master | 2020-08-10T13:58:10.332858 | 2019-10-01T01:24:43 | 2019-10-01T01:24:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 897 | py | from db import settings
from db import db
import datetime
coll = db['video'] # 获得collection的句柄
start_date = datetime.datetime(2018,11,22)
end_date = datetime.datetime(2018,12,22)
value = 'view'
d = {}
for each in coll.find():
author_name = each['author']
d[author_name] = []
each['data'].reverse()
s_value = None
s_date = None
for each_data in each['data']:
if each_data['datetime'] < start_date:
continue
if each_data['datetime'] > end_date:
continue
if s_value == None:
s_value = each_data[value]
s_date = each_data['datetime']
d[author_name] = [{'value':0,'date':s_date.date()}]
continue
c_value = each_data[value] - s_value
c_date = each_data['datetime']
d[author_name].append({'value':c_value,'date':c_date.date()})
pass
pass | [
"jannchie@gmail.com"
] | jannchie@gmail.com |
2c8f22898642d623f2dc9b9b5efdc099726242f5 | 3122ac39f1ce0a882b48293a77195476299c2a3b | /clients/python/generated/swaggyjenkins/models/github_respository_container.py | 48dee5604c6f39d2d948716e12bb4e6c2e073ec9 | [
"MIT"
] | permissive | miao1007/swaggy-jenkins | 4e6fe28470eda2428cbc584dcd365a21caa606ef | af79438c120dd47702b50d51c42548b4db7fd109 | refs/heads/master | 2020-08-30T16:50:27.474383 | 2019-04-10T13:47:17 | 2019-04-10T13:47:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,610 | py | # coding: utf-8
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
OpenAPI spec version: 1.1.1
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class GithubRespositoryContainer(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'_class': 'str',
'links': 'GithubRespositoryContainerlinks',
'repositories': 'GithubRepositories'
}
attribute_map = {
'_class': '_class',
'links': '_links',
'repositories': 'repositories'
}
def __init__(self, _class=None, links=None, repositories=None): # noqa: E501
"""GithubRespositoryContainer - a model defined in OpenAPI""" # noqa: E501
self.__class = None
self._links = None
self._repositories = None
self.discriminator = None
if _class is not None:
self._class = _class
if links is not None:
self.links = links
if repositories is not None:
self.repositories = repositories
@property
def _class(self):
"""Gets the _class of this GithubRespositoryContainer. # noqa: E501
:return: The _class of this GithubRespositoryContainer. # noqa: E501
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this GithubRespositoryContainer.
:param _class: The _class of this GithubRespositoryContainer. # noqa: E501
:type: str
"""
self.__class = _class
@property
def links(self):
"""Gets the links of this GithubRespositoryContainer. # noqa: E501
:return: The links of this GithubRespositoryContainer. # noqa: E501
:rtype: GithubRespositoryContainerlinks
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this GithubRespositoryContainer.
:param links: The links of this GithubRespositoryContainer. # noqa: E501
:type: GithubRespositoryContainerlinks
"""
self._links = links
@property
def repositories(self):
"""Gets the repositories of this GithubRespositoryContainer. # noqa: E501
:return: The repositories of this GithubRespositoryContainer. # noqa: E501
:rtype: GithubRepositories
"""
return self._repositories
@repositories.setter
def repositories(self, repositories):
"""Sets the repositories of this GithubRespositoryContainer.
:param repositories: The repositories of this GithubRespositoryContainer. # noqa: E501
:type: GithubRepositories
"""
self._repositories = repositories
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GithubRespositoryContainer):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"cliffano@gmail.com"
] | cliffano@gmail.com |
caefdb750f7451f754136362f793e60d708369c7 | 85a27847c7bab4dd49c48ea19d16ad4c9de963f8 | /examples/2d/hubbard_honeycomb/main.py | 452ad469b3225d4987ab0a5b1cbe46ef4bc1b894 | [] | no_license | zx-sdu/pygra | edcf3a2648f8413cff5ed753120d414999e3cc76 | bd7f930ccac414892d18dc240b88ee95def1e50b | refs/heads/master | 2020-09-27T19:55:34.719955 | 2018-06-30T09:17:04 | 2018-06-30T09:17:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | # zigzag ribbon
import sys
import os
sys.path.append("../../../pygra") # add pygra library
import numpy as np
import geometry
import scftypes
import operators
from scipy.sparse import csc_matrix
g = geometry.honeycomb_lattice()
g.write()
Us = np.linspace(0.,4.,10) # different Us
#Us = [2.,4.]
#Us = [2.]
f = open("EVOLUTION.OUT","w") # file with the results
for U in Us: # loop over Us
# import scftypes
h = g.get_hamiltonian() # create hamiltonian of the system
h = h.get_multicell()
h.shift_fermi(0.0)
mf = scftypes.guess(h,mode="antiferro")
scf = scftypes.selfconsistency(h,nkp=10,filling=0.5,g=U,
mix=0.9,mf=mf,mode="U")
h = scf.hamiltonian # get the Hamiltonian
# h.get_bands() # calculate band structure
# import groundstate
f.write(str(U)+" "+str(scf.gap)+"\n")
# groundstate.swave(h)
#groundstate.hopping(h)
f.close()
| [
"jose.luis.lado@gmail.com"
] | jose.luis.lado@gmail.com |
e032f064b6c222a89f88751f05f5b40f17fc697f | bd55c7d73a95caed5f47b0031264ec05fd6ff60a | /apps/core/migrations/0060_auto_20180714_2320.py | a8550d55f1c2ad5b649d3cc9977d3d155fe3b2b4 | [] | no_license | phonehtetpaing/ebdjango | 3c8610e2d96318aff3b1db89480b2f298ad91b57 | 1b77d7662ec2bce9a6377690082a656c8e46608c | refs/heads/main | 2023-06-26T13:14:55.319687 | 2021-07-21T06:04:58 | 2021-07-21T06:04:58 | 381,564,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,868 | py | # Generated by Django 2.0.5 on 2018-07-14 14:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0059_merge_20180713_2203'),
]
operations = [
migrations.AddField(
model_name='vendor',
name='oem_service_name',
field=models.CharField(max_length=256, null=True, verbose_name='OEM Service Name'),
),
migrations.AlterField(
model_name='automessagecontroller',
name='auto_message_trigger',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='automessagecontroller_auto_message_trigger', to='core.AutoMessageTrigger', verbose_name='auto_message_trigger'),
),
migrations.AlterField(
model_name='automessagehistory',
name='auto_message_condition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='automessagehistory_auto_message_condition', to='core.AutoMessageCondition', verbose_name='auto_message_condition'),
),
migrations.AlterField(
model_name='automessagehistory',
name='auto_message_trigger',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='automessagehistory_auto_message_trigger', to='core.AutoMessageTrigger', verbose_name='auto_message_trigger'),
),
migrations.AlterField(
model_name='automessagetrigger',
name='auto_message_condition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='automessagetrigger_auto_message_condition', to='core.AutoMessageCondition', verbose_name='auto_message_condition'),
),
]
| [
"phonehtetpaing1221@gmail.com"
] | phonehtetpaing1221@gmail.com |
653405bc92fc15389f4fab309c31b070d3b23323 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_24/models/resource_space_get_response.py | 309ad35ae95011fe8cf2eadb3a0de09b694fed58 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 6,065 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.24
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_24 import models
class ResourceSpaceGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[ResourceSpace]',
'total': 'list[ResourceSpace]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items',
'total': 'total'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.ResourceSpace]
total=None, # type: List[models.ResourceSpace]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[ResourceSpace]): Returns a list of all items after filtering. The values are displayed for each name, if applicable. If `total_only=true`, the `items` list will be empty.
total (list[ResourceSpace]): The aggregate value of all items after filtering. If applicable, the average value is displayed instead. If applicable, the values are displayed for each field.
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
if total is not None:
self.total = total
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourceSpaceGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourceSpaceGetResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourceSpaceGetResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourceSpaceGetResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResourceSpaceGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourceSpaceGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"noreply@github.com"
] | PureStorage-OpenConnect.noreply@github.com |
05f44b6902d671803609ac1d4c1dfd0e4525986d | 4cd35b22c2bbe3658c5582132a07a18764db47d5 | /tests/test_github.py | d53ae2bb02b2b22526738c1dbcb0d99cae28f729 | [] | no_license | crouchred/img2url | ec7e9274ba2f5b15021965b3494ce96da3b80f79 | d317d27d8927624f0fdcf73ce6c5c16d3cfd7d21 | refs/heads/master | 2020-04-10T03:06:22.931641 | 2018-12-07T03:17:41 | 2018-12-07T03:17:41 | 160,760,849 | 0 | 0 | null | 2018-12-07T02:39:22 | 2018-12-07T02:39:22 | null | UTF-8 | Python | false | false | 2,391 | py | # -*- coding: utf-8 -*-
from __future__ import (
division, absolute_import, print_function, unicode_literals,
)
from builtins import * # noqa
from future.builtins.disabled import * # noqa
from tempfile import NamedTemporaryFile
import random
import string
import base64
import pytest
from img2url.config import load_config
from img2url.remotes.github import (
GitHubConfig, GitHubOperation,
)
def random_str(n):
return ''.join(
random.SystemRandom().choice(string.ascii_uppercase)
for _ in range(n)
)
def tmpfile(content, _disable_gc=[]):
f = NamedTemporaryFile(prefix='tmp-img2url-' + random_str(10))
_disable_gc.append(f)
with open(f.name, 'w', encoding='utf-8') as _f:
_f.write(content)
return f.name
def token():
_b64token = 'OTBkZGE1MGQyZjBjNTViMGFhYzIwMzE1YmEwYjU2ZmZhMGEyMWY4Mw=='
t = base64.b64decode(_b64token)
return t.decode('ascii')
CONFIG_PATH = tmpfile('''
github_token: {0}
github_user: img2url-testing
github_repo: img2url-testing-travisci
'''.format(token()))
def test_config():
GitHubConfig(load_config(CONFIG_PATH))
bad_path = tmpfile('''
github_user: img2url-testing
github_repo: img2url-testing-travisci
''')
with pytest.raises(RuntimeError):
GitHubConfig(load_config(bad_path))
def test_create_and_update():
path = tmpfile(random_str(10))
config = GitHubConfig(load_config(CONFIG_PATH))
operator = GitHubOperation(config, path)
assert operator.create_file()
assert operator.update_file(old_fhash=operator.fhash)
def test_branch():
CONFIG_PATH_WITH_BRANCH = tmpfile('''
github_token: {0}
github_user: img2url-testing
github_repo: img2url-testing-travisci
github_branch: branch-test
'''.format(token()))
path = tmpfile(random_str(10))
config = GitHubConfig(load_config(CONFIG_PATH_WITH_BRANCH))
operator = GitHubOperation(config, path)
assert operator.create_file()
def test_path():
CONFIG_PATH_WITH_PATH = tmpfile('''
github_token: {0}
github_user: img2url-testing
github_repo: img2url-testing-travisci
github_path: this-is/random-nested-path-{1}/
'''.format(token(), random_str(10)))
path = tmpfile(random_str(10))
config = GitHubConfig(load_config(CONFIG_PATH_WITH_PATH))
operator = GitHubOperation(config, path)
# list an non-existed dir.
assert operator.list_remote()
| [
"programmer.zhx@gmail.com"
] | programmer.zhx@gmail.com |
00c18797659e9dd2846a468f4a91c204477c5b34 | 5ae3bc1920fafc33693cdfa3928a48158aa6f725 | /563/563.py | 7a36bafd4463155409d5f74749b8c25c86689ec7 | [] | no_license | sjzyjc/leetcode | 2d0764aec6681d567bffd8ff9a8cc482c44336c2 | 5e09a5d36ac55d782628a888ad57d48e234b61ac | refs/heads/master | 2021-04-03T08:26:38.232218 | 2019-08-15T21:54:59 | 2019-08-15T21:54:59 | 124,685,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def findTilt(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
return self.computeTilt(root)[1]
def computeTilt(self, node):
if node is None:
return 0, 0
left_sum, left_tilt = self.computeTilt(node.left)
right_sum, right_tilt = self.computeTilt(node.right)
return left_sum + right_sum + node.val, abs(left_sum - right_sum) + left_tilt + right_tilt
| [
"jcyang@MacBook-Air.local"
] | jcyang@MacBook-Air.local |
aad780c77bd68cbe3aca2ab016bb4aedf822e810 | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-1/dn5/M-228.py | 28ec14812b0decedb82bfb5e86721302c836b662 | [] | no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,466 | py | # Obvezen del
def unikati(s):
xs = []
for e in s:
if e not in xs:
xs.append(e)
return xs
def avtor(tvit):
return tvit.split(":")[0]
def vsi_avtorji(tviti):
s = []
for tvit in tviti:
s.append(avtor(tvit))
return unikati(s)
def izloci_besedo(beseda):
i = j = 0
for e in beseda:
if e.isalnum():
break
else:
i += 1
for e in beseda[::-1]:
if e.isalnum():
break
else:
j += 1
j = len(beseda) - j
return beseda[i:j]
def se_zacne_z(tvit, c):
s = []
for e in tvit.split():
if e[0] == c:
s.append(izloci_besedo(e))
return s
def zberi_se_zacne_z(tviti, c):
s = []
for tvit in tviti:
s.append(se_zacne_z(tvit, c))
xs = [x for xs in s for x in xs] # == for xs in s: for x in xs: xs.append(x)
return unikati(xs)
def vse_afne(tviti):
return zberi_se_zacne_z(tviti, "@")
def vsi_hashtagi(tviti):
return zberi_se_zacne_z(tviti, "#")
def vse_osebe(tviti):
s = unikati(vsi_avtorji(tviti) + vse_afne(tviti))
s.sort()
return s
# Dodaten del
def custva(tviti, hashtagi):
s = []
for hashtag in hashtagi:
for tvit in tviti:
vsi_hashi = se_zacne_z(tvit, "#")
if hashtag in vsi_hashi:
s.append(avtor(tvit))
return vse_osebe(s)
def se_poznata(tviti, oseba1, oseba2):
for tvit in tviti:
if oseba1 == avtor(tvit) and oseba2 in se_zacne_z(tvit, "@") or oseba2 == avtor(tvit) and oseba1 in se_zacne_z(tvit, "@"):
return True
return False
# Testi
import unittest
class TestTviti(unittest.TestCase):
tviti = [
"sandra: Spet ta dež. #dougcajt",
"berta: @sandra Delaj domačo za #programiranje1",
"sandra: @berta Ne maram #programiranje1 #krneki",
"ana: kdo so te @berta, @cilka, @dani? #krneki",
"cilka: jst sm pa #luft",
"benjamin: pogrešam ano #zalosten",
"ema: @benjamin @ana #split? po dvopičju, za začetek?",
]
def test_unikat(self):
self.assertEqual(unikati([1, 2, 1, 1, 3, 2]), [1, 2, 3])
self.assertEqual(unikati([1, 3, 2, 1, 1, 3, 2]), [1, 3, 2])
self.assertEqual(unikati([1, 5, 4, 3, 2]), [1, 5, 4, 3, 2])
self.assertEqual(unikati([1, 1, 1, 1, 1]), [1])
self.assertEqual(unikati([1]), [1])
self.assertEqual(unikati([]), [])
self.assertEqual(unikati(["Ana", "Berta", "Cilka", "Berta"]), ["Ana", "Berta", "Cilka"])
def test_avtor(self):
self.assertEqual(avtor("janez: pred dvopičjem avtor, potem besedilo"), "janez")
self.assertEqual(avtor("ana: malo krajse ime"), "ana")
self.assertEqual(avtor("benjamin: pomembne so tri stvari: prva, druga in tretja"), "benjamin")
def test_vsi_avtorji(self):
self.assertEqual(vsi_avtorji(self.tviti), ["sandra", "berta", "ana", "cilka", "benjamin", "ema"])
self.assertEqual(vsi_avtorji(self.tviti[:3]), ["sandra", "berta"])
def test_izloci_besedo(self):
self.assertEqual(izloci_besedo("@ana"), "ana")
self.assertEqual(izloci_besedo("@@ana!!!"), "ana")
self.assertEqual(izloci_besedo("ana"), "ana")
self.assertEqual(izloci_besedo("!#$%\"=%/%()/Ben-jamin'"), "Ben-jamin")
def test_vse_na_crko(self):
self.assertEqual(se_zacne_z("Benjamin $je $skocil! Visoko!", "$"), ["je", "skocil"])
self.assertEqual(se_zacne_z("Benjamin $je $skocil! #Visoko!", "$"), ["je", "skocil"])
self.assertEqual(se_zacne_z("ana: kdo so te @berta, @cilka, @dani? #krneki", "@"), ["berta", "cilka", "dani"])
def test_zberi_na_crko(self):
self.assertEqual(zberi_se_zacne_z(self.tviti, "@"), ['sandra', 'berta', 'cilka', 'dani', 'benjamin', 'ana'])
self.assertEqual(zberi_se_zacne_z(self.tviti, "#"), ['dougcajt', 'programiranje1', 'krneki', 'luft', 'zalosten', 'split'])
def test_vse_afne(self):
self.assertEqual(vse_afne(self.tviti), ['sandra', 'berta', 'cilka', 'dani', 'benjamin', 'ana'])
def test_vsi_hashtagi(self):
self.assertEqual(vsi_hashtagi(self.tviti), ['dougcajt', 'programiranje1', 'krneki', 'luft', 'zalosten', 'split'])
def test_vse_osebe(self):
self.assertEqual(vse_osebe(self.tviti), ['ana', 'benjamin', 'berta', 'cilka', 'dani', 'ema', 'sandra'])
class TestDodatna(unittest.TestCase):
tviti = [
"sandra: Spet ta dež. #dougcajt",
"berta: @sandra Delaj domačo za #programiranje1",
"sandra: @berta Ne maram #programiranje1 #krneki",
"ana: kdo so te @berta, @cilka, @dani? #krneki",
"cilka: jst sm pa #luft",
"benjamin: pogrešam ano #zalosten",
"ema: @benjamin @ana #split? po dvopičju, za začetek?",
]
def test_custva(self):
self.assertEqual(custva(self.tviti, ["dougcajt", "krneki"]), ["ana", "sandra"])
self.assertEqual(custva(self.tviti, ["luft"]), ["cilka"])
self.assertEqual(custva(self.tviti, ["meh"]), [])
def test_se_poznata(self):
self.assertTrue(se_poznata(self.tviti, "ana", "berta"))
self.assertTrue(se_poznata(self.tviti, "ema", "ana"))
self.assertFalse(se_poznata(self.tviti, "sandra", "ana"))
self.assertFalse(se_poznata(self.tviti, "cilka", "luft"))
self.assertFalse(se_poznata(self.tviti, "cilka", "balon"))
if __name__ == "__main__":
unittest.main()
| [
"benjamin.fele@gmail.com"
] | benjamin.fele@gmail.com |
9ab4e63e98acdc0ede42cb493cab7d0abdda6f41 | 1811ef650cff1dbf0e73bb7b90d6e671ada699d9 | /speach/__version__.py | 56294e44714053b09a6fa899ed435d89799d9dd7 | [
"MIT"
] | permissive | nickduran/speach | 8da8d20e5fb4795eccc99057080c8fffc220b9ed | 05e25138b419dfecc5f96b454b61d481a95e3345 | refs/heads/main | 2023-05-26T06:00:41.532391 | 2021-06-15T05:33:24 | 2021-06-15T05:33:24 | 381,417,392 | 1 | 0 | MIT | 2021-06-29T15:42:58 | 2021-06-29T15:42:58 | null | UTF-8 | Python | false | false | 900 | py | # -*- coding: utf-8 -*-
# This code is a part of speach library: https://github.com/neocl/speach/
# :copyright: (c) 2018 Le Tuan Anh <tuananh.ke@gmail.com>
# :license: MIT, see LICENSE for more details.
__author__ = "Le Tuan Anh"
__email__ = "tuananh.ke@gmail.com"
__copyright__ = "Copyright (c) 2018, Le Tuan Anh <tuananh.ke@gmail.com>"
__credits__ = []
__license__ = "MIT License"
__description__ = "a Python library for managing, annotating, and converting natural language corpuses using popular formats (CoNLL, ELAN, Praat, CSV, JSON, SQLite, VTT, Audacity, TTL, TIG, ISF)"
__url__ = "https://github.com/neocl/speach/"
__issue__ = "https://github.com/neocl/speach/issues/"
__maintainer__ = "Le Tuan Anh"
__version_major__ = "0.1" # follow PEP-0440
__version__ = "{}a9.post2".format(__version_major__)
__version_long__ = "{} - Alpha 9.post2".format(__version_major__)
__status__ = "3 - Alpha"
| [
"tuananh.ke@gmail.com"
] | tuananh.ke@gmail.com |
4b2c22ea97c6081de88619a4b1a0ede4c0ca1348 | cc8010890757670cc2933a6cc2477f3bc10f53b4 | /python/geneditidtools/get_data.py | 32b73a507fe872c74d702d4f62e6df2aad768129 | [
"MIT"
] | permissive | GenEditID/GenEditID | 209153fe16610afa4fe4ed38b34f82905a808dc1 | dd4f454bab1e9d1d64170a16eb7c247799afd436 | refs/heads/master | 2022-06-11T05:40:22.404699 | 2022-04-05T22:12:27 | 2022-04-05T22:12:27 | 183,217,466 | 0 | 2 | MIT | 2022-04-05T22:12:28 | 2019-04-24T11:44:44 | HTML | UTF-8 | Python | false | false | 2,512 | py | import os
import argparse
import pandas
import subprocess
import shutil
import gzip
from pathlib import Path
import geneditid.log as logger
from geneditid.config import cfg
def run_process(log, cmd, dry_run=True):
if not dry_run:
process = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = process.communicate()[0]
retcode = process.returncode
log.info("command '{}' executed".format(" ".join(cmd)))
if retcode == 0:
log.debug(out)
return out
else:
raise subprocess.CalledProcessError(retcode, cmd, out)
else:
log.info("[dry-run] command '{}' executed".format(" ".join(cmd)))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--meta", dest="meta", action="store", help="Metadata file listing accession codes for SRA", required=True)
parser.add_argument("--fqdump", dest="fqdump", action="store", help="Path to fastq_dump tool", default='~/sratoolkit/bin/fastq-dump', required=False)
options = parser.parse_args()
log = logger.get_custom_logger(os.path.join(cfg['PROJECTS_FOLDER'], 'get_data.log'))
fqdump_cmd = options.fqdump.replace('~', str(Path.home()))
codes = pandas.read_csv(options.meta, sep='\t')
for i, row in codes.iterrows():
# create study folder
study_folder = os.path.join(cfg['PROJECTS_FOLDER'], row.study)
if not os.path.exists(study_folder):
os.makedirs(study_folder)
log.info('Project folder {} created'.format(study_folder))
log.info('Downloading {}'.format(row.accession))
run_process(log, [fqdump_cmd, '--split-files', row.accession], False)
log.info('Compressing {}_1.fastq into {}'.format(row.accession, os.path.join(study_folder, row.filename)))
with open("{}_1.fastq".format(row.accession), 'rb') as f_in:
with gzip.open(os.path.join(study_folder, row.filename), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove("{}_1.fastq".format(row.accession))
log.info('Compressing {}_2.fastq into {}'.format(row.accession, os.path.join(study_folder, row.filename2)))
with open("{}_2.fastq".format(row.accession), 'rb') as f_in:
with gzip.open(os.path.join(study_folder, row.filename2), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove("{}_2.fastq".format(row.accession))
if __name__ == '__main__':
main()
| [
"pajanne@gmail.com"
] | pajanne@gmail.com |
cdf1aa18f2637488b927f81e1b53c1b40d319b18 | 69096ca0d67b3d6809a2fe05af51341df62ebc60 | /tibiapy/models/event.py | 4b5f126e048074622e45f3f9bd773a3312b36e33 | [
"Apache-2.0"
] | permissive | Galarzaa90/tibia.py | 6f648aff8b6fbac7be4886435711f7ff08420402 | f8c145dd597c558398bac50e035711e34863b571 | refs/heads/main | 2023-08-17T15:50:31.354488 | 2023-08-17T14:00:07 | 2023-08-17T14:00:07 | 143,892,750 | 30 | 12 | Apache-2.0 | 2023-08-24T17:24:19 | 2018-08-07T15:25:23 | Python | UTF-8 | Python | false | false | 2,504 | py | import datetime
from typing import Optional, List
from tibiapy.models import BaseModel
from tibiapy.urls import get_event_schedule_url
class EventEntry(BaseModel):
"""Represents an event's entry in the calendar."""
title: str
"""The title of the event."""
description: str
"""The description of the event."""
start_date: Optional[datetime.date] = None
"""The day the event starts.
If the event is continuing from the previous month, this will be :obj:`None`."""
end_date: Optional[datetime.date] = None
"""The day the event ends.
If the event is continuing on the next month, this will be :obj:`None`."""
color: Optional[str] = None
"""The displayed color of the event."""
def __eq__(self, other):
return self.title == other.title
@property
def duration(self) -> int:
"""The number of days this event will be active for."""
return (self.end_date - self.start_date + datetime.timedelta(days=1)).days \
if (self.end_date and self.start_date) else None
class EventSchedule(BaseModel):
"""Represents the event's calendar in Tibia.com."""
month: int
"""The month being displayed.
Note that some days from the previous and next month may be included too."""
year: int
"""The year being displayed."""
events: List[EventEntry] = []
"""A list of events that happen during this month.
It might include some events from the previous and next months as well."""
@property
def url(self) -> str:
"""Get the URL to the event calendar with the current parameters."""
return get_event_schedule_url(self.month, self.year)
def get_events_on(self, date: datetime.date) -> List[EventEntry]:
"""Get a list of events that are active during the specified desired_date.
Parameters
----------
date: :class:`datetime.date`
The date to check.
Returns
-------
:class:`list` of :class:`EventEntry`
The events that are active during the desired_date, if any.
Notes
-----
Dates outside the calendar's month and year may yield unexpected results.
"""
def is_between(start, end, desired_date):
start = start or datetime.date.min
end = end or datetime.date.max
return start <= desired_date <= end
return [e for e in self.events if is_between(e.start_date, e.end_date, date)]
| [
"allan.galarza@gmail.com"
] | allan.galarza@gmail.com |
8b83066b58c8832fd2ed754662b8d39d9f60975b | e14d3da0a697297fbbb75079395fc824e34cde72 | /ocaml/3.11.1/tasklets/backup.py | 96ce9c40effd22563aebf57650243adfdd38fa28 | [] | no_license | racktivity/ext-qp5-unstable-qpackages5 | 5be84d377e1d389e9b5922199e545abec206a1a7 | 369b9b2a8ad05d8bd6abedd7d3af07a9b329d8ef | refs/heads/master | 2021-01-10T04:50:55.759871 | 2012-06-04T16:11:33 | 2012-06-04T16:11:33 | 54,315,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | # -*- coding: utf-8 -*-
__author__ = 'amplidata'
__tags__ = 'backup',
def main(q, i, params, tags):
qpackage = params['qpackage']
backupurl=params['backupurl'] #e.g. ftp://login:passwd@10.10.1.1/myroot/ @point to doc about cloudfilesystem
if params['action']=="backup":
pass
if params['action']=="restore":
pass
if params['action']=="backupconfig":
pass
if params['action']=="restoreconfig":
pass
| [
"devnull@localhost"
] | devnull@localhost |
5a7154dbbe545bd1f27db62c3236e70bbb03a201 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /LdXYzf5d3xJgYZur8_16.py | abe941fa827b363ae2d9c71b9a701e21e5f48742 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | """
Create a function that takes three values:
* `h` hours
* `m` minutes
* `s` seconds
Return the value that's the **longest duration**.
### Examples
longest_time(1, 59, 3598) ➞ 1
longest_time(2, 300, 15000) ➞ 300
longest_time(15, 955, 59400) ➞ 59400
### Notes
No two durations will be the same.
"""
def longest_time(h, m, s):
if h*60>m and h*60*60>s:
return h
elif m>h*60 and m*60>s:
return m
else:
return s
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
53397084a66e49b842443597d6d166e71c597ece | 89b6997b24e404c176358073626a8bfad7bcdb8e | /.history/courses/urls_20210412142600.py | 8b0b7efaad01dd31d2638b7887b651d48c78295f | [] | no_license | mohamedhawas123/Education-platform-django | 513e64ac112880385402ce609077796578b4e9ee | 7b83e66bba66b8b2b1a007f5818a534653e6abfb | refs/heads/main | 2023-07-18T16:19:52.177886 | 2021-09-24T12:04:09 | 2021-09-24T12:04:09 | 352,306,462 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | from django.urls import path
from . import views
urlpatterns = [
path('mine/', views.ManageCourseListView.as_view(), name="manage_course_list"),
path('create/', views.CourseCreateView.as_view(), name='course_create'),
path('<pk>/edit/', views.CourseUpdateView.as_view(), name="course_edit"),
path('<pk>/delete', views.CourseDeleteView.as_view(), name="course_delete"),
path('<pk>/module/', views.CourseModuleUpdateView.as_view(), name="course_module_update"),
path('module/<int:module_id>/content/<model_name>/create/', views.ContentCreateUpdateView.as_view(), name="module_content_create"),
path('module/<int:module_id>/content/<model_name>/<id>/', views.ContentCreateUpdateView.as_view(), name="module_content_update"),
path('content/<int:id>/delete/', views.ContentDeleteView.as_view(), name="module_content_delete"),
path('module/<int:module_id>/', views.ModuleContentListView.as_view(), name="module_content_list")
]
| [
"mohamedhawas123@gmail.com"
] | mohamedhawas123@gmail.com |
f54cb6d137abc2a044e4124bf19daf6670e25a84 | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/pandas/tests/io/formats/test_format.py | 73ad2c92b4c5233c74b7395626d8d306c535f49c | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | version https://git-lfs.github.com/spec/v1
oid sha256:d4eb1ef8a0b36a276ec1dc69679ce541f904e786ae325f22a2db1ccff83028aa
size 111859
| [
"github@cuba12345"
] | github@cuba12345 |
6fc3fa084b0b167e0fec77e17943303e9ee31e86 | 365dafff6bd700851b31cfd885fa03c12a1ac417 | /test/functional/rpc_signmessage.py | 4890dba1e0fac529314b1369846632e15df017f1 | [
"MIT"
] | permissive | TheBurningSavage/TheBurningSavage | b0f810f803937412af5f9256ac91eb8cbea00ee8 | dfc00e6c5acc192b4d8a6e8a8ded2efb1252c861 | refs/heads/master | 2020-06-25T14:16:26.676295 | 2019-09-13T09:14:35 | 2019-09-13T09:14:35 | 199,332,824 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,865 | py | #!/usr/bin/env python3
# Copyright (c) 2016-2018 The TheBurningSavage Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC commands for signing and verifying messages."""
from test_framework.test_framework import TheBurningSavageTestFramework
from test_framework.util import assert_equal
class SignMessagesTest(TheBurningSavageTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-addresstype=legacy"]]
def run_test(self):
message = 'This is just a test message'
self.log.info('test signing with priv_key')
priv_key = 'cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N'
address = 'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB'
expected_signature = 'INbVnW4e6PeRmsv2Qgu8NuopvrVjkcxob+sX8OcZG0SALhWybUjzMLPdAsXI46YZGb0KQTRii+wWIQzRpG/U+S0='
signature = self.nodes[0].signmessagewithprivkey(priv_key, message)
assert_equal(expected_signature, signature)
assert(self.nodes[0].verifymessage(address, signature, message))
self.log.info('test signing with an address with wallet')
address = self.nodes[0].getnewaddress()
signature = self.nodes[0].signmessage(address, message)
assert(self.nodes[0].verifymessage(address, signature, message))
self.log.info('test verifying with another address should not work')
other_address = self.nodes[0].getnewaddress()
other_signature = self.nodes[0].signmessage(other_address, message)
assert(not self.nodes[0].verifymessage(other_address, signature, message))
assert(not self.nodes[0].verifymessage(address, other_signature, message))
if __name__ == '__main__':
SignMessagesTest().main()
| [
"willythecat@protonmail.com"
] | willythecat@protonmail.com |
56baf75a073f0b07a19ac7ecd2442fc9f0d746c0 | 8808e1dde1ac315302c0ee57fadb32c041bb3dd8 | /german-alignment/run.py | 22f4dd9ae673841a7b5f6157526877f3ba9254c5 | [] | no_license | mlml/kaldi-gp-alignment | 3513818809634df5f65d29e8e46eccea20fbfa09 | d7b62f6d1346459ad4336d92aa76cd40ea43e036 | refs/heads/master | 2021-01-10T06:44:14.422663 | 2015-10-14T20:44:05 | 2015-10-14T20:44:05 | 44,275,034 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,986 | py |
import os
import sys
from collections import defaultdict
from textgrid import TextGrid, IntervalTier
def parse_ctm(ctm_path):
file_dict = defaultdict(list)
with open(ctm_path, 'r') as f:
for line in f:
line = line.strip()
line = line.split(' ')
filename = line[0]
begin = float(line[2])
duration = float(line[3])
end = round(begin + duration, 2)
label = line[4]
file_dict[filename].append([begin, end, label])
return file_dict
def find_max(input):
return max(x[1] for x in input)
def ctm_to_textgrid(directory, out_directory):
word_path = os.path.join(directory, 'word_ctm')
if not os.path.exists(word_path):
return
phone_path = os.path.join(directory, 'phone_ctm')
current = None
word_dict = parse_ctm(word_path)
phone_dict = parse_ctm(phone_path)
num_files = len(word_dict)
for i,(k,v) in enumerate(word_dict.items()):
print('processing file {} of {}'.format(i,num_files))
maxtime = find_max(v+phone_dict[k])
tg = TextGrid(maxTime = maxtime)
wordtier = IntervalTier(name = 'words', maxTime = maxtime)
phonetier = IntervalTier(name = 'phones', maxTime = maxtime)
for interval in v:
wordtier.add(*interval)
for interval in phone_dict[k]:
phonetier.add(*interval)
tg.append(wordtier)
tg.append(phonetier)
outpath = os.path.join(out_directory, k + '.TextGrid')
tg.write(outpath)
if __name__ == '__main__':
base_dir = os.path.expanduser('~/dev/kaldi-trunk/egs/gp/s5/exp/GE')
output_dir = os.path.expanduser('~/Documents/Data/GlobalPhone/German/aln')
for d in os.listdir(base_dir):
print(d)
in_dir = os.path.join(base_dir, d)
out_dir = os.path.join(output_dir, d)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
ctm_to_textgrid(in_dir,out_dir)
| [
"michael.e.mcauliffe@gmail.com"
] | michael.e.mcauliffe@gmail.com |
63855c717e36995add950dfc21ba41fee42e6072 | 04f4558aa0dc904b8d7c0ab79b80ec11c34f8ccf | /swagger_client/models/inline_response_200_45.py | 34a4b15a8c11998f5918fe3f551a443b1272a999 | [
"Apache-2.0"
] | permissive | scubawhere/scubawhere-api-python-client | 0fc23ffb97446b0bb0825c93528f954e7d642cf4 | 9f8578e251492c7667f785df7b7c9d66e71f5c8e | refs/heads/master | 2020-12-24T11:10:34.880348 | 2016-11-08T12:20:45 | 2016-11-08T12:20:45 | 73,180,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,243 | py | # coding: utf-8
"""
Scubawhere API Documentation
This is the documentation for scubawhere's RMS API. This API is only to be used by authorized parties with valid auth tokens. [Learn about scubawhere](http://www.scubawhere.com) to become an authorized consumer of our API
OpenAPI spec version: 1.0.0
Contact: bryan@scubawhere.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class InlineResponse20045(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, status=None, sessions=None):
"""
InlineResponse20045 - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'status': 'str',
'sessions': 'list[Session]'
}
self.attribute_map = {
'status': 'status',
'sessions': 'sessions'
}
self._status = status
self._sessions = sessions
@property
def status(self):
"""
Gets the status of this InlineResponse20045.
:return: The status of this InlineResponse20045.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this InlineResponse20045.
:param status: The status of this InlineResponse20045.
:type: str
"""
self._status = status
@property
def sessions(self):
"""
Gets the sessions of this InlineResponse20045.
:return: The sessions of this InlineResponse20045.
:rtype: list[Session]
"""
return self._sessions
@sessions.setter
def sessions(self, sessions):
"""
Sets the sessions of this InlineResponse20045.
:param sessions: The sessions of this InlineResponse20045.
:type: list[Session]
"""
self._sessions = sessions
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"bryan@iqwebcreations.com"
] | bryan@iqwebcreations.com |
771dc87fd67bbc87453a422c054facee3c78c43e | 861781589f3f674fdcc34dc2b5f98ea06006c648 | /orderapp/migrations/0002_auto_20201028_0032.py | e51a59cecd9e1a25017186377a1bb707eca84e28 | [] | no_license | FedorPolyakov/geekshope_on_django | 7dd5133d85ffea910113a1c51684c34d8ca56229 | 2a432e2e581f64b2208447369eaa5f58accd000b | refs/heads/master | 2023-01-19T14:09:47.810521 | 2020-11-17T20:52:14 | 2020-11-17T20:52:14 | 296,665,989 | 0 | 0 | null | 2020-11-19T15:59:23 | 2020-09-18T15:53:43 | Python | UTF-8 | Python | false | false | 1,429 | py | # Generated by Django 3.1.1 on 2020-10-27 21:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0006_product_is_active'),
('orderapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='order',
name='status',
field=models.CharField(choices=[('FM', 'формируется'), ('STP', 'отправлен в обработку'), ('PRD', 'обработан'), ('PD', 'оплачен'), ('RDY', 'готов'), ('DN', 'выполнен'), ('CNC', 'отменен')], max_length=3, verbose_name='Статус заказа'),
),
migrations.AlterField(
model_name='orderitem',
name='order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orderitems', to='orderapp.order', verbose_name='Заказ'),
),
migrations.AlterField(
model_name='orderitem',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.product', verbose_name='Продукт'),
),
migrations.AlterField(
model_name='orderitem',
name='quantity',
field=models.SmallIntegerField(default=0, verbose_name='Количество'),
),
]
| [
"pismo.na.pochtu@gmail.com"
] | pismo.na.pochtu@gmail.com |
9870f9f88a5746c6350951a5f6d5eadbb1a9540f | 96538cc3eee3d73d429f3476d0e895be95d695e3 | /worker/news/main.py | 168efa63f44e7fec9b1e4e3df8286974fb1da461 | [] | no_license | FashtimeDotCom/distributed-spider | d9555670216e68d4ff031e466cbf3529d080a534 | 33292f098403fa73239e0c7353e4cc5918be981b | refs/heads/master | 2020-03-22T11:43:14.796426 | 2018-07-06T10:51:48 | 2018-07-06T11:34:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | # -*- coding: utf-8 -*-
'''
Created on 2018-07-04 15:45
---------
@summary: 从master中取网页首地址
---------
@author: Boris
'''
import sys
sys.path.append('../')
import init
import pid
pid.record_pid(__file__)
import utils.tools as tools
from utils.log import log
from base.spider import Spider
from utils.export_data import ExportData
# 需配置
from news.parsers import *
def main():
def begin_callback():
log.info('\n********** news begin **********')
def end_callback():
log.info('\n********** news end **********')
# 配置spider
spider = Spider(tab_urls = 'news:news_urls', begin_callback = begin_callback, end_callback = end_callback)
# 添加parser
spider.add_parser(news_parser)
spider.start()
if __name__ == '__main__':
main() | [
"boris_liu@foxmail.com"
] | boris_liu@foxmail.com |
4e8ea7b0208106797b97138f04f0ce6fa022bf85 | a9823180c93f973a20b492af9181c30a51e1c221 | /debug.py | c044a4f9ba3acfab6f9f4c67a0dab1b416351bea | [
"MIT"
] | permissive | thirtytwobits/uavcan.org | 48f62675f21a5f550829d68ec9f84b7f07f073e6 | 1a16a190520ab751f8e31c02aaeec951c9a3fc19 | refs/heads/master | 2022-05-06T12:26:46.012683 | 2022-04-13T18:38:02 | 2022-04-13T18:38:02 | 170,823,712 | 0 | 0 | MIT | 2019-02-15T07:52:38 | 2019-02-15T07:52:37 | null | UTF-8 | Python | false | false | 472 | py | #!/usr/bin/env python3
import sys
import logging
import os
os.environ['DEBUG'] = '1'
LOG_FORMAT = '%(asctime)s %(levelname)-8s %(name)s: %(message)s'
log_level = logging.DEBUG if 'debug' in sys.argv else logging.INFO
logging.basicConfig(stream=sys.stderr, level=log_level, format=LOG_FORMAT)
sys.path.insert(0, os.path.dirname(__file__))
# noinspection PyUnresolvedReferences
from app import app as application
application.run(host='0.0.0.0', port=4000, debug=True)
| [
"pavel.kirienko@gmail.com"
] | pavel.kirienko@gmail.com |
00daed5e30b686e85e499ae151d9ecac057a9b33 | f85d4c5b7ff8d5fba36ccebf8b8eda6bfcf99019 | /arche/models/tests/test_folder.py | cf2098f9997a6de9f69f8a6e53fa0878ffcbc8ea | [] | no_license | ArcheProject/Arche | 8289ebc6de652b786ba943fafb6ea8130e42ba24 | eb9924f72c167208265ac0a2c80504422e1897c9 | refs/heads/master | 2022-06-26T15:52:17.400944 | 2022-06-14T09:35:16 | 2022-06-14T09:35:16 | 18,967,966 | 3 | 0 | null | 2015-04-22T06:34:26 | 2014-04-20T16:01:52 | Python | UTF-8 | Python | false | false | 615 | py | from unittest import TestCase
from arche.interfaces import IArcheFolder
from pyramid import testing
from zope.interface.verify import verifyClass, verifyObject
class FolderTests(TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
@property
def _cut(self):
from arche.models.folder import ArcheFolder
return ArcheFolder
def test_verify_class(self):
self.failUnless(verifyClass(IArcheFolder, self._cut))
def test_verify_obj(self):
self.failUnless(verifyObject(IArcheFolder, self._cut()))
| [
"robin@betahaus.net"
] | robin@betahaus.net |
4a89cbf78e998b518f507f6dedde62e18c37b532 | eac52a8ae7c539acedaedf8744bd8e20172f0af6 | /epi_solutions/binary_tree/smallest-subtree-with-all-the-deepest-nodes.py | 56c07341774dcb82fc85175510df5d4576742701 | [] | no_license | mshekhar/random-algs | 3a0a0f6e6b21f6a59ed5e1970b7a2bc2044e191f | 7c9a8455f49027a754038b23aaa2df61fe5397ca | refs/heads/master | 2020-03-26T16:29:42.694785 | 2019-07-18T20:57:55 | 2019-07-18T20:57:55 | 145,105,593 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def helper(self, root):
if not root:
return 0, None
left_depth, left_deepeset_node = self.helper(root.left)
right_depth, right_deepeset_node = self.helper(root.right)
if left_depth == right_depth:
return left_depth + 1, root
elif left_depth > right_depth:
return left_depth + 1, left_deepeset_node
else:
return right_depth + 1, right_deepeset_node
def subtreeWithAllDeepest(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
return self.helper(root)
| [
"mayank@moengage.com"
] | mayank@moengage.com |
5fd9b93930a9768df0ae870e00bdd00b8021b6b6 | 0ca7c7bdb297439554777e126ae8a2999962b7fe | /venv/Lib/site-packages/gevent/tests/test__example_udp_server.py | 07d863762540271b1e7dea0d35100f429ca1934a | [] | no_license | YazLuna/APIExpressJobs | 6c0857f63180bf5163d11fa9d1a411e44a4ba46f | cd52bc8d0d60100091637ef79f78cc79d58a1495 | refs/heads/master | 2023-06-13T02:50:57.672295 | 2021-06-18T14:57:53 | 2021-06-18T14:57:53 | 367,244,876 | 0 | 1 | null | 2021-06-18T14:57:53 | 2021-05-14T04:05:43 | Python | UTF-8 | Python | false | false | 535 | py | import socket
from gevent.testing import util
from gevent.testing import main
class Test(util.TestServer):
example = 'udp_server.py'
def _run_all_tests(self):
sock = socket.socket(type=socket.SOCK_DGRAM)
try:
sock.connect(('127.0.0.1', 9000))
sock.send(b'Test udp_server')
data, _address = sock.recvfrom(8192)
self.assertEqual(data, b'Received 15 bytes')
finally:
sock.close()
if __name__ == '__main__':
main()
| [
"ale_200200@hotmail.com"
] | ale_200200@hotmail.com |
4a77dfb840602c3a1ea69030cb783630d378a69b | dde951c8bcfb79cdead3449de42d9ed3e6f24fbe | /dive_into_python/kgp.py | 577a1559521a72f52510266b2f510aebab31bb0b | [] | no_license | wolfeyuanwei/study-python | c764353cbf75b0ccd79dc562fe11eebee712510b | be1a9ec93cd29d9fe6b69ad4f9c059fb9dd308de | refs/heads/master | 2021-05-11T22:57:51.541684 | 2018-02-08T05:03:10 | 2018-02-08T05:03:10 | 117,504,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,716 | py | #!/usr/bin/python
#filename:kgp.py
from xml.dom import minidom
import random
import toolbox
import sys
import getopt
_debug=0
class NoSourceError(Exception):pass
class KantGenerator:
'''generates mock philosophy based on a context-free grammar'''
def __init__(self, grammar, source=None):
self.loadGrammar(grammar)
self.loadSource(source and source or self.getDefaultSource())
self.refresh()
def _load(self, source):
'''load XML input source, return parsed XML document
-a URL of a remote XML file ("http://diveintopythonorg/kant.xml")
-a filename of local XML file ("/diveintopython/common/py/kant.xml")
- standard input ("-")
- the actual XML document, as a string
'''
sock = toolbox.openAnything(source)
xmldoc = minidom.parse(sock).documentElement
sock.close()
return xmldoc
def loadGrammar(self, grammar):
'''load context-free grammar'''
self.grammar = self._load(grammar)
self.refs = {}
for ref in self.grammar.getElementsByTagName("ref"):
self.refs[ref.attributes["id"].value]=ref
def loadSource(self, source):
'''load source'''
self.source = self._load(source)
def getDefaultSource(self):
'''guess default source of the current grammar
The default source will be one of the <ref>s thas is not
cross-referenced. This sounds complicated but it's not.
Example: the default source for kant.xml is
"<xref id='section'/>", because'section' is the one <ref>
thas is not <xref>'d anywhere in the grammar.getElementsByTagName
In most grammars,the default source will produce the
longest(ant most interesting) output.
'''
xrefs = {}
for xref in self.grammar.getElementsByTagName("xref"):
xrefs[xref.attributes["id"].value]=1
xrefs=xrefs.keys()
standaloneXrefs=[e for e in self.refs.keys() if e not in xrefs]
if not standaloneXrefs:
raise NoSourceError, "can't guess source, and no source specified."
return '<xref id="%s"/>' % random.choice(standaloneXrefs)
def reset(self):
'''reset parser'''
self.pieces=[]
self.capitalizeNextWord = 0
def refresh(self):
'''reset output buffer, re-parse entire source file, and return output
Since parsing involves a good deal of randomness, this is an
easy way to get new output without having to reload a grammar file
each time.
'''
self.reset()
self.parse(self.source)
return self.output()
def output(self):
'''output generated text'''
return "".join(self.pieces)
def randomChildElement(self, node):
'''choose a random child element of a node
This is a utility method used by do_xref and do_choice.
'''
choices = [e for e in node.childNodes
if e.nodeType == e.ELEMENT_NODE]
chosen=random.choice(choices)
if _debug:
sys.stderr.write('%s available choices: %s\n' % \
(len(choices), [e.toxml() for e in choices]))
sys.stderr.write('Chosen:%s\n' % chosen.toxml())
return chosen
def parse(self, node):
'''parse a single XML node
A parsed XML document is a tree of nodes
of various types. Each node is represented by an instance of the
corresponding Python class(Element for a tag, Text for
text data, Document for the top-level document).The following
statement constructs the name of a class method based on the type
of node we're parsing ("parse_Element" for an Element node,
"parse_Text" for a Text node, etc.) and then calls the methos.
'''
parseMethod = getattr(self, "parse_%s_" %node.__class__.__name__)
parseMethod(node)
def parse_Document(self, node):
'''parse the document node
The document node by itself isn't interesting (to us), but
its only child, node, documentElement, is it's the root node
of the grammar.
'''
self.parse(node.documentElement)
def parse_Text(self, node):
'''parse a text node
The text of a text node is usually added to the output buffer
verbatim. The noe exception is that <p class='sentence'> sets
a flag to capitalize the first letter of the next word. If
that flag is set, we capitalize the text and reset the flag.
'''
text = node.data
if self.capitalizeNextWord:
self.pieces.append(text[0].upper())
self.pieces.append(text[1:])
self.capitalizeNextWord = 0
else:
self.pieces.append(text)
def parse_Element(self, node):
'''parse an element
An XML element corresponds to an actual tag in the source:
<xref id='...'>, <p chance='...'>, <choice>, etc.
Each element type is handled in its own method. Like we did in
parse(), we construct a method name based on the name of the
element ("do_xref" for an <xref> tag, etc.) and
call the method.
'''
handlerMethod = getattr(self, "do_%s" %node.tagName)
handlerMethod(node)
def parse_Comment(self, node):
'''parse an comment'''
pass
def do_xref(self, node):
'''do xref'''
id = node.attributes["id"].value
self.parse(self.randomChildElement(self.refs[id]))
def do_p(self, node):
'''do p'''
keys = node.attributes.keys()
if "class" in keys:
if node.attributes["class"].value == "sentence":
self.capitalizeNextWord = 1
if "chance" in keys:
chance = int(node.attributes["chance"].value)
doit = (chance > random.randrange(100))
else:
doit = 1
if doit:
for child in node.childNodes:self.parse(child)
def do_choice(self, node):
'''do choice'''
self.parse(self.randomChildElement(node))
def usage():
print __doc__
def main(argv):
grammar = "note.xml"
for v in argv:
print v
try:
opts, args = getopt.getopt(argv, "hg:d", ["help", "grammar="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
elif opt == '-d':
_debug =1
elif opt in ("-g", "--grammar"):
grammar = arg
source = "".join(args)
k=KantGenerator(grammar, source)
print k.output()
if __name__ == "__main__":
main(sys.argv[1:]) | [
"wolfe_yuan@163.com"
] | wolfe_yuan@163.com |
0e6181bcc7f4022fa1bfb9215f66358fb1fbf947 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_snacked.py | 45d305aaad9053ae01e84d243abe6642be1e7899 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py |
#calss header
class _SNACKED():
def __init__(self,):
self.name = "SNACKED"
self.definitions = snack
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['snack']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
f8f265699c8e44ca88151cc21f3fb5cdbfdaeea1 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2631/60747/257584.py | beaeba029f2abe3d32e519c36820c90d7d1d5c23 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | s=input().split(" ")
N=int(s[0])
G=int(s[1])
milk=[G for l in range(N)]
num=[]
count=0
for i in range(N):
num.append(input().split(" "))
for j in range(N):
for k in range(3):
num[j][k]=int(num[j][k])
num.sort()
for d in range(N):
a=milk.count(max(milk))
best=milk.index(max(milk))
milk[num[d][1]-1]=milk[num[d][1]-1]+num[d][2]
if milk.count(max(milk))!=a:
count+=1
else:
if milk.index(max(milk))!=best:
count+=1
print(count,end="")
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
b44468d2b24bbe806eca686c36379f02c9a5acc2 | 800b5166148d4e3cd03825d7d20e2900fbc6c789 | /app_common/migrations/0015_feedbacks_about_object_type.py | 25814c6692de563fc266846ee0ac2cdd8381c69b | [] | no_license | JiSuPiaoYi/dawufupin | 4ffc979a93502eb576776673c98aaeb16021827e | 57756a501436fabe9b27ebca2e80e60932da30dc | refs/heads/master | 2020-04-07T11:37:35.728108 | 2018-11-20T09:09:50 | 2018-11-20T09:09:50 | 158,334,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2018-09-28 21:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_common', '0014_feedbacks_remarks'),
]
operations = [
migrations.AddField(
model_name='feedbacks',
name='about_object_type',
field=models.SmallIntegerField(default=0, verbose_name='涉及对象类型'),
),
]
| [
"360510132@qq.com"
] | 360510132@qq.com |
a2fb3c3846eb698405c231fc6512f0f11eb7f24b | 7ba672971ac7453d9026eb2f9cf4507bac134a94 | /st01.Python기초/0301수업/py11문자열/py11_09_format.py | 97c50eff88081faca5fc5bf8eae46374b72bbe06 | [] | no_license | parky83/python0209 | 5e806bc14679085ca1d1d4c5d59461abb4c8024e | 5159beb9c80f59b2c618eed040ffb4ffb69469c8 | refs/heads/master | 2021-01-01T10:34:39.047489 | 2020-08-15T03:41:41 | 2020-08-15T03:41:41 | 239,239,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | # format 함수의 사용법을 익혀보자
# format() 함수로 숫자를 문자열로 변환하기
# format() 함수로 숫자를 문자열로 변환하기
# 정수
# 특정 칸에 출력하기
# 빈 칸을 0으로 채우기
# 기호와 함께 출력하기
# 조합하기
# 15칸 만들기
# 15칸에 부호 추가하기
# 15칸에 부호 추가하고 0으로 채우기
| [
"tt@gmail.com"
] | tt@gmail.com |
b4764ec5a518554f73560466e781d39533cba196 | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /152/152.maximum-product-subarray.699806514.Accepted.leetcode.python3.py | ba0d37c0ac8d63dff9f8413a9708e951d3544b1e | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | class Solution(object):
def maxProduct(self, nums):
max_so_far, min_so_far, result = nums[0], nums[0], nums[0]
for index in range(1, len(nums)):
if nums[index] > 0:
max_so_far = max(max_so_far * nums[index], nums[index])
min_so_far = min(min_so_far * nums[index], nums[index])
else:
temp = max_so_far
max_so_far = max(min_so_far * nums[index], nums[index])
min_so_far = min(temp * nums[index], nums[index])
result = max(result, max_so_far)
return result
| [
"huangyingw@gmail.com"
] | huangyingw@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.